From f0575cc39abd1f8b2a41289e6665d6cef18c39e1 Mon Sep 17 00:00:00 2001 From: Lewis Kang Date: Tue, 19 Apr 2016 18:23:17 +0800 Subject: [PATCH 1/6] port platform-accton-as7716_32x-device-drivers.patch from ONL1.0 & patch for as7716 managment port's driver --- .../patches/driver-broadcom-tigon3.patch | 27389 ++++++++++++++++ .../patches/mgmt-port-init-config.patch | 50 + ...orm-accton-as7716_32x-device-drivers.patch | 1707 + .../kernels/3.2.65-1+deb7u2/patches/series | 3 + 4 files changed, 29149 insertions(+) create mode 100644 packages/base/any/kernels/3.2.65-1+deb7u2/patches/driver-broadcom-tigon3.patch create mode 100644 packages/base/any/kernels/3.2.65-1+deb7u2/patches/mgmt-port-init-config.patch create mode 100644 packages/base/any/kernels/3.2.65-1+deb7u2/patches/platform-accton-as7716_32x-device-drivers.patch diff --git a/packages/base/any/kernels/3.2.65-1+deb7u2/patches/driver-broadcom-tigon3.patch b/packages/base/any/kernels/3.2.65-1+deb7u2/patches/driver-broadcom-tigon3.patch new file mode 100644 index 00000000..ffff1d30 --- /dev/null +++ b/packages/base/any/kernels/3.2.65-1+deb7u2/patches/driver-broadcom-tigon3.patch @@ -0,0 +1,27389 @@ +support Broadcom Tigon3 Ethernet driver + +diff --git a/drivers/net/ethernet/broadcom/Makefile b/drivers/net/ethernet/broadcom/Makefile +index b789605..486c71c 100644 +--- a/drivers/net/ethernet/broadcom/Makefile ++++ b/drivers/net/ethernet/broadcom/Makefile +@@ -8,4 +8,4 @@ obj-$(CONFIG_BNX2) += bnx2.o + obj-$(CONFIG_CNIC) += cnic.o + obj-$(CONFIG_BNX2X) += bnx2x/ + obj-$(CONFIG_SB1250_MAC) += sb1250-mac.o +-obj-$(CONFIG_TIGON3) += tg3.o ++obj-$(CONFIG_TIGON3) += tg3/ +diff --git a/drivers/net/ethernet/broadcom/tg3/Makefile b/drivers/net/ethernet/broadcom/tg3/Makefile +new file mode 100644 +index 0000000..22b6141 +--- /dev/null ++++ b/drivers/net/ethernet/broadcom/tg3/Makefile +@@ -0,0 +1,5 @@ ++# ++# Makefile for Broadcom Tigon3 ethernet driver ++# ++ ++obj-$(CONFIG_TIGON3) += tg3.o +diff --git a/drivers/net/ethernet/broadcom/tg3/tg3.c b/drivers/net/ethernet/broadcom/tg3/tg3.c +new file mode 100644 +index 0000000..4894a11 +--- /dev/null ++++ b/drivers/net/ethernet/broadcom/tg3/tg3.c +@@ -0,0 +1,19937 @@ ++/* ++ * tg3.c: Broadcom Tigon3 ethernet driver. ++ * ++ * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com) ++ * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com) ++ * Copyright (C) 2004 Sun Microsystems Inc. ++ * Copyright (C) 2005-2015 Broadcom Corporation. ++ * Portions Copyright (C) VMware, Inc. 2007-2010. All Rights Reserved. ++ * ++ * Firmware is: ++ * Derived from proprietary unpublished source code, ++ * Copyright (C) 2000-2003 Broadcom Corporation. ++ * ++ * Permission is hereby granted for the distribution of this firmware ++ * data in hexadecimal or equivalent format, provided this copyright ++ * notice is accompanying it. ++ */ ++ ++#include "tg3_flags.h" ++ ++#include ++ ++#if (LINUX_VERSION_CODE < 0x020612) ++#include ++#endif ++ ++#if (LINUX_VERSION_CODE < 0x020500) ++#if defined(CONFIG_MODVERSIONS) && defined(MODULE) && ! defined(MODVERSIONS) ++#define MODVERSIONS ++#include ++#endif ++#endif ++#include ++#if (LINUX_VERSION_CODE >= 0x20600) ++#include ++#endif ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#ifdef BCM_HAS_MDIO_H ++#include ++#endif ++#include ++#ifdef BCM_INCLUDE_PHYLIB_SUPPORT ++#include ++#include ++#include ++#endif ++#include ++#include ++#include ++#if (LINUX_VERSION_CODE >= 0x20600) ++#include ++#endif ++#include ++#if (LINUX_VERSION_CODE >= 0x020600) ++#include ++#endif ++#ifdef BCM_HAS_REQUEST_FIRMWARE ++#include ++#else ++#include "tg3_firmware.h" ++#endif ++#include ++ ++#ifndef IS_ENABLED ++#define __ARG_PLACEHOLDER_1 0, ++#define config_enabled(cfg) _config_enabled(cfg) ++#define _config_enabled(value) __config_enabled(__ARG_PLACEHOLDER_##value) ++#define __config_enabled(arg1_or_junk) ___config_enabled(arg1_or_junk 1, 0) ++#define ___config_enabled(__ignored, val, ...) val ++ ++#define IS_ENABLED(option) \ ++ (config_enabled(option) || config_enabled(option##_MODULE)) ++#endif ++ ++#if IS_ENABLED(CONFIG_HWMON) && !defined(__VMKLNX__) ++#include ++#include ++#endif ++ ++#include ++#include ++ ++#include ++#include ++#include ++ ++#ifdef BCM_HAS_IEEE1588_SUPPORT ++#include ++#if IS_ENABLED(CONFIG_PTP_1588_CLOCK) ++#include ++#else /* IS_ENABLED(CONFIG_PTP_1588_CLOCK) */ ++#include ++#include ++#endif /* IS_ENABLED(CONFIG_PTP_1588_CLOCK) */ ++#endif ++ ++#ifdef CONFIG_SPARC ++#include ++#include ++#endif ++ ++#define BAR_0 0 ++#define BAR_2 2 ++ ++#include "tg3.h" ++ ++/* Functions & macros to verify TG3_FLAGS types */ ++ ++static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits) ++{ ++ return test_bit(flag, bits); ++} ++ ++static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits) ++{ ++ set_bit(flag, bits); ++} ++ ++static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits) ++{ ++ clear_bit(flag, bits); ++} ++ ++#define tg3_flag(tp, flag) \ ++ _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags) ++#define tg3_flag_set(tp, flag) \ ++ _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags) ++#define tg3_flag_clear(tp, flag) \ ++ _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags) ++ ++#define DRV_MODULE_NAME "tg3" ++#define TG3_MAJ_NUM 3 ++#define TG3_MIN_NUM 137 ++#define TG3_REVISION "k" ++#define DRV_MODULE_VERSION \ ++ __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)\ ++ TG3_REVISION ++#define DRV_MODULE_RELDATE "April 1, 2015" ++#define RESET_KIND_SHUTDOWN 0 ++#define RESET_KIND_INIT 1 ++#define RESET_KIND_SUSPEND 2 ++ ++#define TG3_DEF_RX_MODE 0 ++#define TG3_DEF_TX_MODE 0 ++#define TG3_DEF_MSG_ENABLE \ ++ (NETIF_MSG_DRV | \ ++ NETIF_MSG_PROBE | \ ++ NETIF_MSG_LINK | \ ++ NETIF_MSG_TIMER | \ ++ NETIF_MSG_IFDOWN | \ ++ NETIF_MSG_IFUP | \ ++ NETIF_MSG_RX_ERR | \ ++ NETIF_MSG_TX_ERR) ++ ++#define TG3_GRC_LCLCTL_PWRSW_DELAY 100 ++ ++/* length of time before we decide the hardware is borked, ++ * and dev->tx_timeout() should be called to fix the problem ++ */ ++#if defined(__VMKLNX__) ++/* On VMware ESX there is a possibility that that netdev watchdog thread ++ * runs before the reset task if the machine is loaded. If this occurs ++ * too many times, these premature watchdog triggers will cause a PSOD ++ * on a VMware ESX beta build */ ++#define TG3_TX_TIMEOUT (20 * HZ) ++#else ++#define TG3_TX_TIMEOUT (5 * HZ) ++#endif /* defined(__VMKLNX__) */ ++ ++/* hardware minimum and maximum for a single frame's data payload */ ++#define TG3_MIN_MTU 60 ++#define TG3_MAX_MTU(tp) \ ++ (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500) ++ ++/* These numbers seem to be hard coded in the NIC firmware somehow. ++ * You can't change the ring sizes, but you can change where you place ++ * them in the NIC onboard memory. ++ */ ++#define TG3_RX_STD_RING_SIZE(tp) \ ++ (tg3_flag(tp, LRG_PROD_RING_CAP) ? \ ++ TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700) ++#define TG3_RX_JMB_RING_SIZE(tp) \ ++ (tg3_flag(tp, LRG_PROD_RING_CAP) ? \ ++ TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700) ++ ++#if defined(__VMKLNX__) ++#define TG3_DEF_RX_RING_PENDING 255 ++#define TG3_DEF_RX_JUMBO_RING_PENDING 200 ++#else ++#define TG3_DEF_RX_RING_PENDING 200 ++#define TG3_DEF_RX_JUMBO_RING_PENDING 100 ++#endif ++ ++/* Do not place this n-ring entries value into the tp struct itself, ++ * we really want to expose these constants to GCC so that modulo et ++ * al. operations are done with shifts and masks instead of with ++ * hw multiply/modulo instructions. Another solution would be to ++ * replace things like '% foo' with '& (foo - 1)'. ++ */ ++ ++#define TG3_TX_RING_SIZE 512 ++#define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1) ++ ++#define TG3_RX_STD_RING_BYTES(tp) \ ++ (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp)) ++#define TG3_RX_JMB_RING_BYTES(tp) \ ++ (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp)) ++#define TG3_RX_RCB_RING_BYTES(tp) \ ++ (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1)) ++#define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \ ++ TG3_TX_RING_SIZE) ++#define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1)) ++ ++#define TG3_DMA_BYTE_ENAB 64 ++ ++#define TG3_RX_STD_DMA_SZ 1536 ++#define TG3_RX_JMB_DMA_SZ 9046 ++ ++#define TG3_RX_DMA_TO_MAP_SZ(x) ((x) + TG3_DMA_BYTE_ENAB) ++ ++#define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ) ++#define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ) ++ ++#define TG3_RX_STD_BUFF_RING_SIZE(tp) \ ++ (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp)) ++ ++#define TG3_RX_JMB_BUFF_RING_SIZE(tp) \ ++ (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp)) ++ ++/* Due to a hardware bug, the 5701 can only DMA to memory addresses ++ * that are at least dword aligned when used in PCIX mode. The driver ++ * works around this bug by double copying the packet. This workaround ++ * is built into the normal double copy length check for efficiency. ++ * ++ * However, the double copy is only necessary on those architectures ++ * where unaligned memory accesses are inefficient. For those architectures ++ * where unaligned memory accesses incur little penalty, we can reintegrate ++ * the 5701 in the normal rx path. Doing so saves a device structure ++ * dereference by hardcoding the double copy threshold in place. ++ */ ++#define TG3_RX_COPY_THRESHOLD 256 ++#if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) ++ #define TG3_RX_COPY_THRESH(tp) TG3_RX_COPY_THRESHOLD ++#else ++ #define TG3_RX_COPY_THRESH(tp) ((tp)->rx_copy_thresh) ++#endif ++ ++#if (NET_IP_ALIGN != 0) ++#define TG3_RX_OFFSET(tp) ((tp)->rx_offset) ++#else ++#ifdef BCM_HAS_BUILD_SKB ++#define TG3_RX_OFFSET(tp) (NET_SKB_PAD) ++#else ++#define TG3_RX_OFFSET(tp) 0 ++#endif ++#endif ++ ++/* minimum number of free TX descriptors required to wake up TX process */ ++#define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4) ++#define TG3_TX_BD_DMA_MAX_2K 2048 ++#define TG3_TX_BD_DMA_MAX_4K 4096 ++#define TG3_TX_BD_DMA_MAX_32K 32768 ++ ++#define TG3_RAW_IP_ALIGN 2 ++ ++#define TG3_MAX_UCAST_ADDR(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 3) ++#define TG3_UCAST_ADDR_IDX(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 1) ++ ++#include "tg3_compat2.h" ++ ++#if defined(__VMKLNX__) ++/* see pr141646, 626764*/ ++#define TG3_FW_UPDATE_TIMEOUT_SEC 30 ++#else ++#define TG3_FW_UPDATE_TIMEOUT_SEC 5 ++#endif ++#define TG3_FW_UPDATE_FREQ_SEC (TG3_FW_UPDATE_TIMEOUT_SEC / 2) ++ ++#define FIRMWARE_TG3 "tigon/tg3.bin" ++#define FIRMWARE_TG357766 "tigon/tg357766.bin" ++#define FIRMWARE_TG3TSO "tigon/tg3_tso.bin" ++#define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin" ++ ++static char version[] __devinitdata = ++ DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; ++ ++MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)"); ++MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver"); ++MODULE_LICENSE("GPL"); ++MODULE_VERSION(DRV_MODULE_VERSION); ++MODULE_FIRMWARE(FIRMWARE_TG3); ++MODULE_FIRMWARE(FIRMWARE_TG3TSO); ++MODULE_FIRMWARE(FIRMWARE_TG3TSO5); ++ ++static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */ ++#if (LINUX_VERSION_CODE >= 0x20600) ++module_param(tg3_debug, int, 0); ++MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value"); ++#endif ++#if defined(__VMKLNX__) && (VMWARE_ESX_DDK_VERSION >= 55000) ++static int disable_fw_dmp; ++module_param(disable_fw_dmp, int, 0); ++MODULE_PARM_DESC(disable_fw_dmp, "For debugging purposes, disable firmware " ++ "dump feature when set to value of 1"); ++#endif ++ ++static int tg3_disable_eee = -1; ++#if (LINUX_VERSION_CODE >= 0x20600) ++module_param(tg3_disable_eee, int, 0); ++MODULE_PARM_DESC(tg3_disable_eee, "Disable Energy Efficient Ethernet (EEE) support"); ++#endif ++ ++#define TG3_DRV_DATA_FLAG_10_100_ONLY 0x0001 ++#define TG3_DRV_DATA_FLAG_5705_10_100 0x0002 ++ ++static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = { ++ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)}, ++ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)}, ++ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)}, ++ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)}, ++ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)}, ++ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)}, ++ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)}, ++ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)}, ++ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)}, ++ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)}, ++ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)}, ++ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)}, ++ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)}, ++ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)}, ++ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)}, ++ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)}, ++ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)}, ++ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)}, ++ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901), ++ .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY | ++ TG3_DRV_DATA_FLAG_5705_10_100}, ++ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2), ++ .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY | ++ TG3_DRV_DATA_FLAG_5705_10_100}, ++ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)}, ++ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F), ++ .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY | ++ TG3_DRV_DATA_FLAG_5705_10_100}, ++ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)}, ++ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)}, ++ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)}, ++ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)}, ++ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)}, ++ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F), ++ .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY}, ++ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)}, ++ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)}, ++ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)}, ++ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)}, ++ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F), ++ .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY}, ++ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)}, ++ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)}, ++ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)}, ++ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)}, ++ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)}, ++ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)}, ++ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)}, ++ {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5787M, ++ PCI_VENDOR_ID_LENOVO, ++ TG3PCI_SUBDEVICE_ID_LENOVO_5787M), ++ .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY}, ++ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)}, ++ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F), ++ .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY}, ++ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)}, ++ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)}, ++ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)}, ++ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)}, ++ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)}, ++ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)}, ++ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)}, ++ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)}, ++ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)}, ++ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)}, ++ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)}, ++ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)}, ++ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)}, ++ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)}, ++ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)}, ++ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)}, ++ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)}, ++ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)}, ++ {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780, ++ PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_A), ++ .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY}, ++ {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780, ++ PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_B), ++ .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY}, ++ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)}, ++ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)}, ++ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790), ++ .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY}, ++ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)}, ++ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)}, ++ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717_C)}, ++ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)}, ++ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)}, ++ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)}, ++ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)}, ++ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)}, ++ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791), ++ .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY}, ++ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795), ++ .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY}, ++ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)}, ++ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)}, ++ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57762)}, ++ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57766)}, ++ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5762)}, ++ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5725)}, ++ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5727)}, ++ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57764)}, ++ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57767)}, ++ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57787)}, ++ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57782)}, ++ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57786)}, ++ {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)}, ++ {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)}, ++ {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)}, ++ {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)}, ++ {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)}, ++ {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)}, ++ {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)}, ++ {} ++}; ++ ++MODULE_DEVICE_TABLE(pci, tg3_pci_tbl); ++ ++static const struct { ++ const char string[ETH_GSTRING_LEN]; ++} ethtool_stats_keys[] = { ++ { "rx_octets" }, ++ { "rx_fragments" }, ++ { "rx_ucast_packets" }, ++ { "rx_mcast_packets" }, ++ { "rx_bcast_packets" }, ++ { "rx_fcs_errors" }, ++ { "rx_align_errors" }, ++ { "rx_xon_pause_rcvd" }, ++ { "rx_xoff_pause_rcvd" }, ++ { "rx_mac_ctrl_rcvd" }, ++ { "rx_xoff_entered" }, ++ { "rx_frame_too_long_errors" }, ++ { "rx_jabbers" }, ++ { "rx_undersize_packets" }, ++ { "rx_in_length_errors" }, ++ { "rx_out_length_errors" }, ++ { "rx_64_or_less_octet_packets" }, ++ { "rx_65_to_127_octet_packets" }, ++ { "rx_128_to_255_octet_packets" }, ++ { "rx_256_to_511_octet_packets" }, ++ { "rx_512_to_1023_octet_packets" }, ++ { "rx_1024_to_1522_octet_packets" }, ++ { "rx_1523_to_2047_octet_packets" }, ++ { "rx_2048_to_4095_octet_packets" }, ++ { "rx_4096_to_8191_octet_packets" }, ++ { "rx_8192_to_9022_octet_packets" }, ++ ++ { "tx_octets" }, ++ { "tx_collisions" }, ++ ++ { "tx_xon_sent" }, ++ { "tx_xoff_sent" }, ++ { "tx_flow_control" }, ++ { "tx_mac_errors" }, ++ { "tx_single_collisions" }, ++ { "tx_mult_collisions" }, ++ { "tx_deferred" }, ++ { "tx_excessive_collisions" }, ++ { "tx_late_collisions" }, ++ { "tx_collide_2times" }, ++ { "tx_collide_3times" }, ++ { "tx_collide_4times" }, ++ { "tx_collide_5times" }, ++ { "tx_collide_6times" }, ++ { "tx_collide_7times" }, ++ { "tx_collide_8times" }, ++ { "tx_collide_9times" }, ++ { "tx_collide_10times" }, ++ { "tx_collide_11times" }, ++ { "tx_collide_12times" }, ++ { "tx_collide_13times" }, ++ { "tx_collide_14times" }, ++ { "tx_collide_15times" }, ++ { "tx_ucast_packets" }, ++ { "tx_mcast_packets" }, ++ { "tx_bcast_packets" }, ++ { "tx_carrier_sense_errors" }, ++ { "tx_discards" }, ++ { "tx_errors" }, ++ ++ { "dma_writeq_full" }, ++ { "dma_write_prioq_full" }, ++ { "rxbds_empty" }, ++ { "rx_discards" }, ++ { "rx_errors" }, ++ { "rx_threshold_hit" }, ++ ++ { "dma_readq_full" }, ++ { "dma_read_prioq_full" }, ++ { "tx_comp_queue_full" }, ++ ++ { "ring_set_send_prod_index" }, ++ { "ring_status_update" }, ++ { "nic_irqs" }, ++ { "nic_avoided_irqs" }, ++ { "nic_tx_threshold_hit" }, ++ ++ { "mbuf_lwm_thresh_hit" }, ++ { "dma_4g_cross" }, ++#if !defined(__VMKLNX__) ++ { "recoverable_err" }, ++ { "unrecoverable_err" }, ++#endif ++}; ++ ++#define TG3_NUM_STATS ARRAY_SIZE(ethtool_stats_keys) ++#define TG3_NVRAM_TEST 0 ++#define TG3_LINK_TEST 1 ++#define TG3_REGISTER_TEST 2 ++#define TG3_MEMORY_TEST 3 ++#define TG3_MAC_LOOPB_TEST 4 ++#define TG3_PHY_LOOPB_TEST 5 ++#define TG3_EXT_LOOPB_TEST 6 ++#define TG3_INTERRUPT_TEST 7 ++ ++ ++static const struct { ++ const char string[ETH_GSTRING_LEN]; ++} ethtool_test_keys[] = { ++ [TG3_NVRAM_TEST] = { "nvram test (online) " }, ++ [TG3_LINK_TEST] = { "link test (online) " }, ++ [TG3_REGISTER_TEST] = { "register test (offline)" }, ++ [TG3_MEMORY_TEST] = { "memory test (offline)" }, ++ [TG3_MAC_LOOPB_TEST] = { "mac loopback test (offline)" }, ++ [TG3_PHY_LOOPB_TEST] = { "phy loopback test (offline)" }, ++ [TG3_EXT_LOOPB_TEST] = { "ext loopback test (offline)" }, ++ [TG3_INTERRUPT_TEST] = { "interrupt test (offline)" }, ++}; ++ ++#define TG3_NUM_TEST ARRAY_SIZE(ethtool_test_keys) ++ ++ ++static void tg3_write32(struct tg3 *tp, u32 off, u32 val) ++{ ++ writel(val, tp->regs + off); ++} ++ ++static u32 tg3_read32(struct tg3 *tp, u32 off) ++{ ++ return readl(tp->regs + off); ++} ++ ++static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val) ++{ ++ writel(val, tp->aperegs + off); ++} ++ ++static u32 tg3_ape_read32(struct tg3 *tp, u32 off) ++{ ++ return readl(tp->aperegs + off); ++} ++ ++static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val) ++{ ++ unsigned long flags; ++ ++ spin_lock_irqsave(&tp->indirect_lock, flags); ++ pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off); ++ pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val); ++ spin_unlock_irqrestore(&tp->indirect_lock, flags); ++} ++ ++static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val) ++{ ++ writel(val, tp->regs + off); ++ readl(tp->regs + off); ++} ++ ++static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off) ++{ ++ unsigned long flags; ++ u32 val; ++ ++ spin_lock_irqsave(&tp->indirect_lock, flags); ++ pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off); ++ pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val); ++ spin_unlock_irqrestore(&tp->indirect_lock, flags); ++ return val; ++} ++ ++static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val) ++{ ++ unsigned long flags; ++ ++ if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) { ++ pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX + ++ TG3_64BIT_REG_LOW, val); ++ return; ++ } ++ if (off == TG3_RX_STD_PROD_IDX_REG) { ++ pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX + ++ TG3_64BIT_REG_LOW, val); ++ return; ++ } ++ ++ spin_lock_irqsave(&tp->indirect_lock, flags); ++ pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600); ++ pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val); ++ spin_unlock_irqrestore(&tp->indirect_lock, flags); ++ ++ /* In indirect mode when disabling interrupts, we also need ++ * to clear the interrupt bit in the GRC local ctrl register. ++ */ ++ if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) && ++ (val == 0x1)) { ++ pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL, ++ tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT); ++ } ++} ++ ++static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off) ++{ ++ unsigned long flags; ++ u32 val; ++ ++ spin_lock_irqsave(&tp->indirect_lock, flags); ++ pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600); ++ pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val); ++ spin_unlock_irqrestore(&tp->indirect_lock, flags); ++ return val; ++} ++ ++/* usec_wait specifies the wait time in usec when writing to certain registers ++ * where it is unsafe to read back the register without some delay. ++ * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power. ++ * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed. ++ */ ++static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait) ++{ ++ if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND)) ++ /* Non-posted methods */ ++ tp->write32(tp, off, val); ++ else { ++ /* Posted method */ ++ tg3_write32(tp, off, val); ++ if (usec_wait) ++ udelay(usec_wait); ++ tp->read32(tp, off); ++ } ++ /* Wait again after the read for the posted method to guarantee that ++ * the wait time is met. ++ */ ++ if (usec_wait) ++ udelay(usec_wait); ++} ++ ++static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val) ++{ ++ tp->write32_mbox(tp, off, val); ++ if (tg3_flag(tp, FLUSH_POSTED_WRITES) || ++ (!tg3_flag(tp, MBOX_WRITE_REORDER) && ++ !tg3_flag(tp, ICH_WORKAROUND))) ++ tp->read32_mbox(tp, off); ++} ++ ++static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val) ++{ ++ void __iomem *mbox = tp->regs + off; ++ writel(val, mbox); ++ if (tg3_flag(tp, TXD_MBOX_HWBUG)) ++ writel(val, mbox); ++ if (tg3_flag(tp, MBOX_WRITE_REORDER) || ++ tg3_flag(tp, FLUSH_POSTED_WRITES)) ++ readl(mbox); ++} ++ ++static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off) ++{ ++ return readl(tp->regs + off + GRCMBOX_BASE); ++} ++ ++static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val) ++{ ++ writel(val, tp->regs + off + GRCMBOX_BASE); ++} ++ ++#define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val) ++#define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val)) ++#define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val) ++#define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val) ++#define tr32_mailbox(reg) tp->read32_mbox(tp, reg) ++ ++#define tw32(reg, val) tp->write32(tp, reg, val) ++#define tw32_f(reg, val) _tw32_flush(tp, (reg), (val), 0) ++#define tw32_wait_f(reg, val, us) _tw32_flush(tp, (reg), (val), (us)) ++#define tr32(reg) tp->read32(tp, reg) ++ ++static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val) ++{ ++ unsigned long flags; ++ ++ if (tg3_asic_rev(tp) == ASIC_REV_5906 && ++ (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) ++ return; ++ ++ spin_lock_irqsave(&tp->indirect_lock, flags); ++ if (tg3_flag(tp, SRAM_USE_CONFIG)) { ++ pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off); ++ pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val); ++ ++ /* Always leave this as zero. */ ++ pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0); ++ } else { ++ tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off); ++ tw32_f(TG3PCI_MEM_WIN_DATA, val); ++ ++ /* Always leave this as zero. */ ++ tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0); ++ } ++ spin_unlock_irqrestore(&tp->indirect_lock, flags); ++} ++ ++static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val) ++{ ++ unsigned long flags; ++ ++ if (tg3_asic_rev(tp) == ASIC_REV_5906 && ++ (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) { ++ *val = 0; ++ return; ++ } ++ ++ spin_lock_irqsave(&tp->indirect_lock, flags); ++ if (tg3_flag(tp, SRAM_USE_CONFIG)) { ++ pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off); ++ pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val); ++ ++ /* Always leave this as zero. */ ++ pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0); ++ } else { ++ tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off); ++ *val = tr32(TG3PCI_MEM_WIN_DATA); ++ ++ /* Always leave this as zero. */ ++ tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0); ++ } ++ spin_unlock_irqrestore(&tp->indirect_lock, flags); ++} ++ ++static void tg3_ape_lock_init(struct tg3 *tp) ++{ ++ int i; ++ u32 regbase, bit; ++ ++ if (tg3_asic_rev(tp) == ASIC_REV_5761) ++ regbase = TG3_APE_LOCK_GRANT; ++ else ++ regbase = TG3_APE_PER_LOCK_GRANT; ++ ++ /* Make sure the driver hasn't any stale locks. */ ++ for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) { ++ switch (i) { ++ case TG3_APE_LOCK_PHY0: ++ case TG3_APE_LOCK_PHY1: ++ case TG3_APE_LOCK_PHY2: ++ case TG3_APE_LOCK_PHY3: ++ bit = APE_LOCK_GRANT_DRIVER; ++ break; ++ default: ++ if (!tp->pci_fn) ++ bit = APE_LOCK_GRANT_DRIVER; ++ else ++ bit = 1 << tp->pci_fn; ++ } ++ tg3_ape_write32(tp, regbase + 4 * i, bit); ++ } ++ ++} ++ ++static int tg3_ape_lock(struct tg3 *tp, int locknum) ++{ ++ int i, off; ++ int ret = 0; ++ u32 status, req, gnt, bit; ++ ++ if (!tg3_flag(tp, ENABLE_APE)) ++ return 0; ++ ++ switch (locknum) { ++ case TG3_APE_LOCK_GPIO: ++ if (tg3_asic_rev(tp) == ASIC_REV_5761) ++ return 0; ++ case TG3_APE_LOCK_GRC: ++ case TG3_APE_LOCK_MEM: ++ if (!tp->pci_fn) ++ bit = APE_LOCK_REQ_DRIVER; ++ else ++ bit = 1 << tp->pci_fn; ++ break; ++ case TG3_APE_LOCK_PHY0: ++ case TG3_APE_LOCK_PHY1: ++ case TG3_APE_LOCK_PHY2: ++ case TG3_APE_LOCK_PHY3: ++ bit = APE_LOCK_REQ_DRIVER; ++ break; ++ default: ++ return -EINVAL; ++ } ++ ++ if (tg3_asic_rev(tp) == ASIC_REV_5761) { ++ req = TG3_APE_LOCK_REQ; ++ gnt = TG3_APE_LOCK_GRANT; ++ } else { ++ req = TG3_APE_PER_LOCK_REQ; ++ gnt = TG3_APE_PER_LOCK_GRANT; ++ } ++ ++ off = 4 * locknum; ++ ++ tg3_ape_write32(tp, req + off, bit); ++ ++ /* Wait for up to 1 millisecond to acquire lock. */ ++ for (i = 0; i < 100; i++) { ++ status = tg3_ape_read32(tp, gnt + off); ++ if (status == bit) ++ break; ++ if (pci_channel_offline(tp->pdev)) ++ break; ++ ++ udelay(10); ++ } ++ ++ if (status != bit) { ++ /* Revoke the lock request. */ ++ tg3_ape_write32(tp, gnt + off, bit); ++ ret = -EBUSY; ++ } ++ ++ return ret; ++} ++ ++static void tg3_ape_unlock(struct tg3 *tp, int locknum) ++{ ++ u32 gnt, bit; ++ ++ if (!tg3_flag(tp, ENABLE_APE)) ++ return; ++ ++ switch (locknum) { ++ case TG3_APE_LOCK_GPIO: ++ if (tg3_asic_rev(tp) == ASIC_REV_5761) ++ return; ++ case TG3_APE_LOCK_GRC: ++ case TG3_APE_LOCK_MEM: ++ if (!tp->pci_fn) ++ bit = APE_LOCK_GRANT_DRIVER; ++ else ++ bit = 1 << tp->pci_fn; ++ break; ++ case TG3_APE_LOCK_PHY0: ++ case TG3_APE_LOCK_PHY1: ++ case TG3_APE_LOCK_PHY2: ++ case TG3_APE_LOCK_PHY3: ++ bit = APE_LOCK_GRANT_DRIVER; ++ break; ++ default: ++ return; ++ } ++ ++ if (tg3_asic_rev(tp) == ASIC_REV_5761) ++ gnt = TG3_APE_LOCK_GRANT; ++ else ++ gnt = TG3_APE_PER_LOCK_GRANT; ++ ++ tg3_ape_write32(tp, gnt + 4 * locknum, bit); ++} ++ ++static int tg3_ape_event_lock(struct tg3 *tp, u32 timeout_us) ++{ ++ u32 apedata; ++ ++ while (timeout_us) { ++ if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM)) ++ return -EBUSY; ++ ++ apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS); ++ if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING)) ++ break; ++ ++ tg3_ape_unlock(tp, TG3_APE_LOCK_MEM); ++ ++#if defined(__VMKLNX__) || (LINUX_VERSION_CODE < 0x020627) /* 2.6.39 */ ++ udelay(10); ++#else ++ usleep_range(10, 20); ++#endif ++ timeout_us -= (timeout_us > 10) ? 10 : timeout_us; ++ } ++ ++ return timeout_us ? 0 : -EBUSY; ++} ++ ++/* ESX needs tg3_ape_scratchpad_read for FW dump for ESX 5.5 and after */ ++#if (IS_ENABLED(CONFIG_HWMON) && !defined(__VMKLNX__)) || \ ++ (defined(__VMKLNX__) && VMWARE_ESX_DDK_VERSION >= 55000) ++static int tg3_ape_wait_for_event(struct tg3 *tp, u32 timeout_us) ++{ ++ u32 i, apedata; ++ ++ for (i = 0; i < timeout_us / 10; i++) { ++ apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS); ++ ++ if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING)) ++ break; ++ ++ udelay(10); ++ } ++ ++ return i == timeout_us / 10; ++} ++ ++static int tg3_ape_scratchpad_read(struct tg3 *tp, u32 *data, u32 base_off, ++ u32 len) ++{ ++ int err; ++ u32 i, bufoff, msgoff, maxlen, apedata; ++ ++ if (!tg3_flag(tp, APE_HAS_NCSI)) ++ return 0; ++ ++ apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG); ++ if (apedata != APE_SEG_SIG_MAGIC) ++ return -ENODEV; ++ ++ apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS); ++ if (!(apedata & APE_FW_STATUS_READY)) ++ return -EAGAIN; ++ ++ bufoff = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_OFF) + ++ TG3_APE_SHMEM_BASE; ++ msgoff = bufoff + 2 * sizeof(u32); ++ maxlen = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_LEN); ++ ++ while (len) { ++ u32 length; ++ ++ /* Cap xfer sizes to scratchpad limits. */ ++ length = (len > maxlen) ? maxlen : len; ++ len -= length; ++ ++ apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS); ++ if (!(apedata & APE_FW_STATUS_READY)) ++ return -EAGAIN; ++ ++ /* Wait for up to 1 msec for APE to service previous event. */ ++ err = tg3_ape_event_lock(tp, 1000); ++ if (err) ++ return err; ++ ++ apedata = APE_EVENT_STATUS_DRIVER_EVNT | ++ APE_EVENT_STATUS_SCRTCHPD_READ | ++ APE_EVENT_STATUS_EVENT_PENDING; ++ tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, apedata); ++ ++ tg3_ape_write32(tp, bufoff, base_off); ++ tg3_ape_write32(tp, bufoff + sizeof(u32), length); ++ ++ tg3_ape_unlock(tp, TG3_APE_LOCK_MEM); ++ tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1); ++ ++ base_off += length; ++ ++ if (tg3_ape_wait_for_event(tp, 30000)) ++ return -EAGAIN; ++ ++ for (i = 0; length; i += 4, length -= 4) { ++ u32 val = tg3_ape_read32(tp, msgoff + i); ++ memcpy(data, &val, sizeof(u32)); ++ data++; ++ } ++ } ++ ++ return 0; ++} ++#endif ++ ++static int tg3_ape_send_event(struct tg3 *tp, u32 event) ++{ ++ int err; ++ u32 apedata; ++ ++ apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG); ++ if (apedata != APE_SEG_SIG_MAGIC) ++ return -EAGAIN; ++ ++ apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS); ++ if (!(apedata & APE_FW_STATUS_READY)) ++ return -EAGAIN; ++ ++ /* Wait for up to 20 millisecond for APE to service previous event. */ ++ err = tg3_ape_event_lock(tp, 20000); ++ if (err) ++ return err; ++ ++ tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, ++ event | APE_EVENT_STATUS_EVENT_PENDING); ++ ++ tg3_ape_unlock(tp, TG3_APE_LOCK_MEM); ++ tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1); ++ ++ return 0; ++} ++ ++static void tg3_ape_driver_state_change(struct tg3 *tp, int kind) ++{ ++ u32 event; ++ u32 apedata; ++ ++ if (!tg3_flag(tp, ENABLE_APE)) ++ return; ++ ++ switch (kind) { ++ case RESET_KIND_INIT: ++ tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_COUNT, tp->ape_hb++); ++ tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, ++ APE_HOST_SEG_SIG_MAGIC); ++ tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN, ++ APE_HOST_SEG_LEN_MAGIC); ++ apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT); ++ tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata); ++ tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID, ++ APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, ++ TG3_MIN_NUM, ++ TG3_REVISION[0])); ++ tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR, ++ APE_HOST_BEHAV_NO_PHYLOCK); ++ tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, ++ TG3_APE_HOST_DRVR_STATE_START); ++ ++ event = APE_EVENT_STATUS_STATE_START; ++ break; ++ case RESET_KIND_SHUTDOWN: ++ if (device_may_wakeup(&tp->pdev->dev) && ++ tg3_flag(tp, WOL_ENABLE)) { ++ tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED, ++ TG3_APE_HOST_WOL_SPEED_AUTO); ++ apedata = TG3_APE_HOST_DRVR_STATE_WOL; ++ } else ++ apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD; ++ ++ tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata); ++ ++ event = APE_EVENT_STATUS_STATE_UNLOAD; ++ break; ++ default: ++ return; ++ } ++ ++ event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE; ++ ++ tg3_ape_send_event(tp, event); ++} ++ ++static void tg3_disable_ints(struct tg3 *tp) ++{ ++ int i; ++ ++ tw32(TG3PCI_MISC_HOST_CTRL, ++ (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT)); ++ for (i = 0; i < tp->irq_max; i++) ++ tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001); ++} ++ ++static void tg3_enable_ints(struct tg3 *tp) ++{ ++ int i; ++ ++ tp->irq_sync = 0; ++ wmb(); ++ ++ tw32(TG3PCI_MISC_HOST_CTRL, ++ (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT)); ++ ++ tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE; ++ for (i = 0; i < tp->irq_cnt; i++) { ++ struct tg3_napi *tnapi = &tp->napi[i]; ++ ++ tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24); ++ if (tg3_flag(tp, 1SHOT_MSI)) ++ tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24); ++ ++ tp->coal_now |= tnapi->coal_now; ++ } ++ ++ /* Force an initial interrupt */ ++ if (!tg3_flag(tp, TAGGED_STATUS) && ++#if defined(__VMKLNX__) ++ tp->napi[0].hw_status && ++#endif ++ (tp->napi[0].hw_status->status & SD_STATUS_UPDATED)) ++ tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT); ++ else ++ tw32(HOSTCC_MODE, tp->coal_now); ++ ++#ifndef TG3_INBOX ++ if (tp->irq_cnt > 1) ++ tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now); ++ else ++ tp->coal_now &= ~(tp->napi[0].coal_now); ++#else ++ tp->coal_now &= ~(tp->napi[0].coal_now); ++#endif ++} ++ ++static inline unsigned int tg3_has_work(struct tg3_napi *tnapi) ++{ ++ struct tg3 *tp = tnapi->tp; ++ struct tg3_hw_status *sblk = tnapi->hw_status; ++ unsigned int work_exists = 0; ++ ++ /* check for phy events */ ++ if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) { ++ if (sblk->status & SD_STATUS_LINK_CHG) ++ work_exists = 1; ++ } ++ ++ /* check for TX work to do */ ++ if (sblk->idx[0].tx_consumer != tnapi->tx_cons) ++ work_exists = 1; ++ ++ /* check for RX work to do */ ++ if (tnapi->rx_rcb_prod_idx && ++ *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr) ++ work_exists = 1; ++ ++ return work_exists; ++} ++ ++/* tg3_int_reenable ++ * similar to tg3_enable_ints, but it accurately determines whether there ++ * is new work pending and can return without flushing the PIO write ++ * which reenables interrupts ++ */ ++static void tg3_int_reenable(struct tg3_napi *tnapi) ++{ ++ struct tg3 *tp = tnapi->tp; ++ ++ tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24); ++ mmiowb(); ++ ++ /* When doing tagged status, this work check is unnecessary. ++ * The last_tag we write above tells the chip which piece of ++ * work we've completed. ++ */ ++ if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi)) ++ tw32(HOSTCC_MODE, tp->coalesce_mode | ++ HOSTCC_MODE_ENABLE | tnapi->coal_now); ++} ++ ++static void tg3_switch_clocks(struct tg3 *tp) ++{ ++ u32 clock_ctrl; ++ u32 orig_clock_ctrl; ++ ++ if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS)) ++ return; ++ ++ clock_ctrl = tr32(TG3PCI_CLOCK_CTRL); ++ ++ orig_clock_ctrl = clock_ctrl; ++ clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN | ++ CLOCK_CTRL_CLKRUN_OENABLE | ++ 0x1f); ++ tp->pci_clock_ctrl = clock_ctrl; ++ ++ if (tg3_flag(tp, 5705_PLUS)) { ++ if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) { ++ tw32_wait_f(TG3PCI_CLOCK_CTRL, ++ clock_ctrl | CLOCK_CTRL_625_CORE, 40); ++ } ++ } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) { ++ tw32_wait_f(TG3PCI_CLOCK_CTRL, ++ clock_ctrl | ++ (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK), ++ 40); ++ tw32_wait_f(TG3PCI_CLOCK_CTRL, ++ clock_ctrl | (CLOCK_CTRL_ALTCLK), ++ 40); ++ } ++ tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40); ++} ++ ++#define PHY_BUSY_LOOPS 5000 ++ ++static int __tg3_readphy(struct tg3 *tp, unsigned int phy_addr, int reg, ++ u32 *val) ++{ ++ u32 frame_val; ++ unsigned int loops; ++ int ret; ++ ++ if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) { ++ tw32_f(MAC_MI_MODE, ++ (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL)); ++ udelay(80); ++ } ++ ++ tg3_ape_lock(tp, tp->phy_ape_lock); ++ ++ *val = 0x0; ++ ++ frame_val = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) & ++ MI_COM_PHY_ADDR_MASK); ++ frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) & ++ MI_COM_REG_ADDR_MASK); ++ frame_val |= (MI_COM_CMD_READ | MI_COM_START); ++ ++ tw32_f(MAC_MI_COM, frame_val); ++ ++ loops = PHY_BUSY_LOOPS; ++ while (loops != 0) { ++ udelay(10); ++ frame_val = tr32(MAC_MI_COM); ++ ++ if ((frame_val & MI_COM_BUSY) == 0) { ++ udelay(5); ++ frame_val = tr32(MAC_MI_COM); ++ break; ++ } ++ loops -= 1; ++ } ++ ++ ret = -EBUSY; ++ if (loops != 0) { ++ *val = frame_val & MI_COM_DATA_MASK; ++ ret = 0; ++ } ++ ++ if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) { ++ tw32_f(MAC_MI_MODE, tp->mi_mode); ++ udelay(80); ++ } ++ ++ tg3_ape_unlock(tp, tp->phy_ape_lock); ++ ++ return ret; ++} ++ ++static int tg3_readphy(struct tg3 *tp, int reg, u32 *val) ++{ ++ return __tg3_readphy(tp, tp->phy_addr, reg, val); ++} ++ ++static int __tg3_writephy(struct tg3 *tp, unsigned int phy_addr, int reg, ++ u32 val) ++{ ++ u32 frame_val; ++ unsigned int loops; ++ int ret; ++ ++ if ((tp->phy_flags & TG3_PHYFLG_IS_FET) && ++ (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL)) ++ return 0; ++ ++ if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) { ++ tw32_f(MAC_MI_MODE, ++ (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL)); ++ udelay(80); ++ } ++ ++ tg3_ape_lock(tp, tp->phy_ape_lock); ++ ++ frame_val = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) & ++ MI_COM_PHY_ADDR_MASK); ++ frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) & ++ MI_COM_REG_ADDR_MASK); ++ frame_val |= (val & MI_COM_DATA_MASK); ++ frame_val |= (MI_COM_CMD_WRITE | MI_COM_START); ++ ++ tw32_f(MAC_MI_COM, frame_val); ++ ++ loops = PHY_BUSY_LOOPS; ++ while (loops != 0) { ++ udelay(10); ++ frame_val = tr32(MAC_MI_COM); ++ if ((frame_val & MI_COM_BUSY) == 0) { ++ udelay(5); ++ frame_val = tr32(MAC_MI_COM); ++ break; ++ } ++ loops -= 1; ++ } ++ ++ ret = -EBUSY; ++ if (loops != 0) ++ ret = 0; ++ ++ if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) { ++ tw32_f(MAC_MI_MODE, tp->mi_mode); ++ udelay(80); ++ } ++ ++ tg3_ape_unlock(tp, tp->phy_ape_lock); ++ ++ return ret; ++} ++ ++static int tg3_writephy(struct tg3 *tp, int reg, u32 val) ++{ ++ return __tg3_writephy(tp, tp->phy_addr, reg, val); ++} ++ ++static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val) ++{ ++ int err; ++ ++ err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad); ++ if (err) ++ goto done; ++ ++ err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr); ++ if (err) ++ goto done; ++ ++ err = tg3_writephy(tp, MII_TG3_MMD_CTRL, ++ MII_TG3_MMD_CTRL_DATA_NOINC | devad); ++ if (err) ++ goto done; ++ ++ err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val); ++ ++done: ++ return err; ++} ++ ++static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val) ++{ ++ int err; ++ ++ err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad); ++ if (err) ++ goto done; ++ ++ err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr); ++ if (err) ++ goto done; ++ ++ err = tg3_writephy(tp, MII_TG3_MMD_CTRL, ++ MII_TG3_MMD_CTRL_DATA_NOINC | devad); ++ if (err) ++ goto done; ++ ++ err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val); ++ ++done: ++ return err; ++} ++ ++static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val) ++{ ++ int err; ++ ++ err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg); ++ if (!err) ++ err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val); ++ ++ return err; ++} ++ ++static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val) ++{ ++ int err; ++ ++ err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg); ++ if (!err) ++ err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val); ++ ++ return err; ++} ++ ++static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val) ++{ ++ int err; ++ ++ err = tg3_writephy(tp, MII_TG3_AUX_CTRL, ++ (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) | ++ MII_TG3_AUXCTL_SHDWSEL_MISC); ++ if (!err) ++ err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val); ++ ++ return err; ++} ++ ++static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set) ++{ ++ if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC) ++ set |= MII_TG3_AUXCTL_MISC_WREN; ++ ++ return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg); ++} ++ ++static int tg3_phy_toggle_auxctl_smdsp(struct tg3 *tp, bool enable) ++{ ++ u32 val; ++ int err; ++ ++ err = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val); ++ ++ if (err) ++ return err; ++ ++ if (enable) ++ val |= MII_TG3_AUXCTL_ACTL_SMDSP_ENA; ++ else ++ val &= ~MII_TG3_AUXCTL_ACTL_SMDSP_ENA; ++ ++ err = tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, ++ val | MII_TG3_AUXCTL_ACTL_TX_6DB); ++ ++ return err; ++} ++ ++static int tg3_phy_shdw_read(struct tg3 *tp, int reg, u32 *val) ++{ ++ int err; ++ ++ err = tg3_writephy(tp, MII_TG3_MISC_SHDW, reg); ++ if (!err) ++ err = tg3_readphy(tp, MII_TG3_MISC_SHDW, val); ++ ++ return err; ++} ++ ++static int tg3_phy_shdw_write(struct tg3 *tp, int reg, u32 val) ++{ ++ return tg3_writephy(tp, MII_TG3_MISC_SHDW, ++ reg | val | MII_TG3_MISC_SHDW_WREN); ++} ++ ++static int tg3_bmcr_reset(struct tg3 *tp) ++{ ++ u32 phy_control; ++ int limit, err; ++ ++ /* OK, reset it, and poll the BMCR_RESET bit until it ++ * clears or we time out. ++ */ ++ phy_control = BMCR_RESET; ++ err = tg3_writephy(tp, MII_BMCR, phy_control); ++ if (err != 0) ++ return -EBUSY; ++ ++ limit = 5000; ++ while (limit--) { ++ err = tg3_readphy(tp, MII_BMCR, &phy_control); ++ if (err != 0) ++ return -EBUSY; ++ ++ if ((phy_control & BMCR_RESET) == 0) { ++ udelay(40); ++ break; ++ } ++ udelay(10); ++ } ++ if (limit < 0) ++ return -EBUSY; ++ ++ return 0; ++} ++ ++#ifdef BCM_INCLUDE_PHYLIB_SUPPORT ++static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg) ++{ ++ struct tg3 *tp = bp->priv; ++ u32 val; ++ ++ spin_lock_bh(&tp->lock); ++ ++ if (__tg3_readphy(tp, mii_id, reg, &val)) ++ val = -EIO; ++ ++ spin_unlock_bh(&tp->lock); ++ ++ return val; ++} ++ ++static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val) ++{ ++ struct tg3 *tp = bp->priv; ++ u32 ret = 0; ++ ++ spin_lock_bh(&tp->lock); ++ ++ if (__tg3_writephy(tp, mii_id, reg, val)) ++ ret = -EIO; ++ ++ spin_unlock_bh(&tp->lock); ++ ++ return ret; ++} ++ ++static int tg3_mdio_reset(struct mii_bus *bp) ++{ ++ return 0; ++} ++#endif /* BCM_INCLUDE_PHYLIB_SUPPORT */ ++ ++static void tg3_mdio_config_5785(struct tg3 *tp) ++{ ++ u32 val; ++#ifdef BCM_INCLUDE_PHYLIB_SUPPORT ++ struct phy_device *phydev; ++ ++ phydev = tp->mdio_bus->phy_map[tp->phy_addr]; ++ switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) { ++ case PHY_ID_BCM50610: ++ case PHY_ID_BCM50610M: ++ case PHY_ID_BCM50612E: ++ val = MAC_PHYCFG2_50610_LED_MODES; ++ break; ++ case PHY_ID_BCMAC131: ++ val = MAC_PHYCFG2_AC131_LED_MODES; ++ break; ++ case PHY_ID_RTL8211C: ++ val = MAC_PHYCFG2_RTL8211C_LED_MODES; ++ break; ++ case PHY_ID_RTL8201E: ++ val = MAC_PHYCFG2_RTL8201E_LED_MODES; ++ break; ++ default: ++ return; ++ } ++ ++ if (phydev->interface != PHY_INTERFACE_MODE_RGMII) { ++ tw32(MAC_PHYCFG2, val); ++ ++ val = tr32(MAC_PHYCFG1); ++ val &= ~(MAC_PHYCFG1_RGMII_INT | ++ MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK); ++ val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT; ++ tw32(MAC_PHYCFG1, val); ++ ++ return; ++ } ++#else ++ if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCMAC131) { ++ tw32(MAC_PHYCFG2, MAC_PHYCFG2_AC131_LED_MODES); ++ ++ val = tr32(MAC_PHYCFG1); ++ val &= ~(MAC_PHYCFG1_RGMII_INT | ++ MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK); ++ val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT; ++ tw32(MAC_PHYCFG1, val); ++ ++ return; ++ } ++ ++ val = MAC_PHYCFG2_50610_LED_MODES; ++#endif /* BCM_INCLUDE_PHYLIB_SUPPORT */ ++ ++ if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) ++ val |= MAC_PHYCFG2_EMODE_MASK_MASK | ++ MAC_PHYCFG2_FMODE_MASK_MASK | ++ MAC_PHYCFG2_GMODE_MASK_MASK | ++ MAC_PHYCFG2_ACT_MASK_MASK | ++ MAC_PHYCFG2_QUAL_MASK_MASK | ++ MAC_PHYCFG2_INBAND_ENABLE; ++ ++ tw32(MAC_PHYCFG2, val); ++ ++ val = tr32(MAC_PHYCFG1); ++ val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK | ++ MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN); ++ if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) { ++ if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN)) ++ val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC; ++ if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN)) ++ val |= MAC_PHYCFG1_RGMII_SND_STAT_EN; ++ } ++ val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT | ++ MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV; ++ tw32(MAC_PHYCFG1, val); ++ ++ val = tr32(MAC_EXT_RGMII_MODE); ++ val &= ~(MAC_RGMII_MODE_RX_INT_B | ++ MAC_RGMII_MODE_RX_QUALITY | ++ MAC_RGMII_MODE_RX_ACTIVITY | ++ MAC_RGMII_MODE_RX_ENG_DET | ++ MAC_RGMII_MODE_TX_ENABLE | ++ MAC_RGMII_MODE_TX_LOWPWR | ++ MAC_RGMII_MODE_TX_RESET); ++ if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) { ++ if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN)) ++ val |= MAC_RGMII_MODE_RX_INT_B | ++ MAC_RGMII_MODE_RX_QUALITY | ++ MAC_RGMII_MODE_RX_ACTIVITY | ++ MAC_RGMII_MODE_RX_ENG_DET; ++ if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN)) ++ val |= MAC_RGMII_MODE_TX_ENABLE | ++ MAC_RGMII_MODE_TX_LOWPWR | ++ MAC_RGMII_MODE_TX_RESET; ++ } ++ tw32(MAC_EXT_RGMII_MODE, val); ++} ++ ++static void tg3_mdio_start(struct tg3 *tp) ++{ ++ tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL; ++ tw32_f(MAC_MI_MODE, tp->mi_mode); ++ udelay(80); ++ ++#ifdef BCM_INCLUDE_PHYLIB_SUPPORT ++ if (tg3_flag(tp, MDIOBUS_INITED) && ++ tg3_asic_rev(tp) == ASIC_REV_5785) ++ tg3_mdio_config_5785(tp); ++#else ++ if (tg3_asic_rev(tp) != ASIC_REV_5785) ++ return; ++ ++ tg3_mdio_config_5785(tp); ++ ++ if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) { ++ u32 val; ++ ++ /* FIXME -- This shouldn't be required, but without ++ * it, the device will not pass traffic until ++ * the phy is reset via a link up event or ++ * through a change in speed settings. ++ */ ++ tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val); ++ if (tg3_flag(tp, RGMII_INBAND_DISABLE)) ++ val |= MII_TG3_AUXCTL_MISC_RGMII_OOBSC; ++ else ++ val &= ~MII_TG3_AUXCTL_MISC_RGMII_OOBSC; ++ tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, val); ++ } ++#endif /* BCM_INCLUDE_PHYLIB_SUPPORT */ ++} ++ ++static int tg3_mdio_init(struct tg3 *tp) ++{ ++#ifdef BCM_INCLUDE_PHYLIB_SUPPORT ++ int i; ++ u32 reg; ++ struct phy_device *phydev; ++#endif ++ ++ if (tg3_flag(tp, 5717_PLUS)) { ++ u32 is_serdes; ++ ++ tp->phy_addr = tp->pci_fn + 1; ++ ++ if (tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0) ++ is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES; ++ else ++ is_serdes = tr32(TG3_CPMU_PHY_STRAP) & ++ TG3_CPMU_PHY_STRAP_IS_SERDES; ++ if (is_serdes) ++ tp->phy_addr += 7; ++ } else if (tg3_flag(tp, IS_SSB_CORE) && tg3_flag(tp, ROBOSWITCH)) { ++ int addr; ++ ++ addr = ssb_gige_get_phyaddr(tp->pdev); ++ if (addr < 0) ++ return addr; ++ tp->phy_addr = addr; ++ } else ++ tp->phy_addr = TG3_PHY_MII_ADDR; ++ ++ tg3_mdio_start(tp); ++ ++#ifdef BCM_INCLUDE_PHYLIB_SUPPORT ++ if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED)) ++ return 0; ++ ++ tp->mdio_bus = mdiobus_alloc(); ++ if (tp->mdio_bus == NULL) ++ return -ENOMEM; ++ ++ tp->mdio_bus->name = "tg3 mdio bus"; ++#ifdef MII_BUS_ID_SIZE ++ snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x", ++ (tp->pdev->bus->number << 8) | tp->pdev->devfn); ++#else ++ tp->mdio_bus->id = tp->pdev->devfn; ++#endif ++ tp->mdio_bus->priv = tp; ++#ifdef BCM_MDIOBUS_HAS_PARENT ++ tp->mdio_bus->parent = &tp->pdev->dev; ++#endif ++ tp->mdio_bus->read = &tg3_mdio_read; ++ tp->mdio_bus->write = &tg3_mdio_write; ++ tp->mdio_bus->reset = &tg3_mdio_reset; ++ tp->mdio_bus->phy_mask = ~(1 << tp->phy_addr); ++ tp->mdio_bus->irq = &tp->mdio_irq[0]; ++ ++ for (i = 0; i < PHY_MAX_ADDR; i++) ++ tp->mdio_bus->irq[i] = PHY_POLL; ++ ++ /* The bus registration will look for all the PHYs on the mdio bus. ++ * Unfortunately, it does not ensure the PHY is powered up before ++ * accessing the PHY ID registers. A chip reset is the ++ * quickest way to bring the device back to an operational state.. ++ */ ++ if (tg3_readphy(tp, MII_BMCR, ®) || (reg & BMCR_PDOWN)) ++ tg3_bmcr_reset(tp); ++ ++ i = mdiobus_register(tp->mdio_bus); ++ if (i) { ++ dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i); ++ mdiobus_free(tp->mdio_bus); ++ return i; ++ } ++ ++ phydev = tp->mdio_bus->phy_map[tp->phy_addr]; ++ ++ if (!phydev || !phydev->drv) { ++ dev_warn(&tp->pdev->dev, "No PHY devices\n"); ++ mdiobus_unregister(tp->mdio_bus); ++ mdiobus_free(tp->mdio_bus); ++ return -ENODEV; ++ } ++ ++ switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) { ++ case PHY_ID_BCM57780: ++ phydev->interface = PHY_INTERFACE_MODE_GMII; ++ phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE; ++ break; ++ case PHY_ID_BCM50610: ++ case PHY_ID_BCM50610M: ++ case PHY_ID_BCM50612E: ++ phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE | ++ PHY_BRCM_RX_REFCLK_UNUSED | ++ PHY_BRCM_DIS_TXCRXC_NOENRGY | ++ PHY_BRCM_AUTO_PWRDWN_ENABLE; ++ if (tg3_flag(tp, RGMII_INBAND_DISABLE)) ++ phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE; ++ if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN)) ++ phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE; ++ if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN)) ++ phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE; ++ /* fallthru */ ++ case PHY_ID_RTL8211C: ++ phydev->interface = PHY_INTERFACE_MODE_RGMII; ++ break; ++ case PHY_ID_RTL8201E: ++ case PHY_ID_BCMAC131: ++ phydev->interface = PHY_INTERFACE_MODE_MII; ++ phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE; ++ tp->phy_flags |= TG3_PHYFLG_IS_FET; ++ break; ++ } ++ ++ tg3_flag_set(tp, MDIOBUS_INITED); ++ ++ if (tg3_asic_rev(tp) == ASIC_REV_5785) ++ tg3_mdio_config_5785(tp); ++#endif /* BCM_INCLUDE_PHYLIB_SUPPORT */ ++ ++ return 0; ++} ++ ++static void tg3_mdio_fini(struct tg3 *tp) ++{ ++#ifdef BCM_INCLUDE_PHYLIB_SUPPORT ++ if (tg3_flag(tp, MDIOBUS_INITED)) { ++ tg3_flag_clear(tp, MDIOBUS_INITED); ++ mdiobus_unregister(tp->mdio_bus); ++ mdiobus_free(tp->mdio_bus); ++ } ++#endif /* BCM_INCLUDE_PHYLIB_SUPPORT */ ++} ++ ++/* tp->lock is held. */ ++static inline void tg3_generate_fw_event(struct tg3 *tp) ++{ ++ u32 val; ++ ++ val = tr32(GRC_RX_CPU_EVENT); ++ val |= GRC_RX_CPU_DRIVER_EVENT; ++ tw32_f(GRC_RX_CPU_EVENT, val); ++ ++ tp->last_event_jiffies = jiffies; ++} ++ ++#define TG3_FW_EVENT_TIMEOUT_USEC 2500 ++ ++/* tp->lock is held. */ ++static void tg3_wait_for_event_ack(struct tg3 *tp) ++{ ++ int i; ++ unsigned int delay_cnt; ++ long time_remain; ++ ++ /* If enough time has passed, no wait is necessary. */ ++ time_remain = (long)(tp->last_event_jiffies + 1 + ++ usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) - ++ (long)jiffies; ++ if (time_remain < 0) ++ return; ++ ++ /* Check if we can shorten the wait time. */ ++ delay_cnt = jiffies_to_usecs(time_remain); ++ if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC) ++ delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC; ++ delay_cnt = (delay_cnt >> 3) + 1; ++ ++ for (i = 0; i < delay_cnt; i++) { ++ if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT)) ++ break; ++ if (pci_channel_offline(tp->pdev)) ++ break; ++ ++ udelay(8); ++ } ++} ++ ++/* tp->lock is held. */ ++static void tg3_phy_gather_ump_data(struct tg3 *tp, u32 *data) ++{ ++ u32 reg, val; ++ ++ val = 0; ++ if (!tg3_readphy(tp, MII_BMCR, ®)) ++ val = reg << 16; ++ if (!tg3_readphy(tp, MII_BMSR, ®)) ++ val |= (reg & 0xffff); ++ *data++ = val; ++ ++ val = 0; ++ if (!tg3_readphy(tp, MII_ADVERTISE, ®)) ++ val = reg << 16; ++ if (!tg3_readphy(tp, MII_LPA, ®)) ++ val |= (reg & 0xffff); ++ *data++ = val; ++ ++ val = 0; ++ if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) { ++ if (!tg3_readphy(tp, MII_CTRL1000, ®)) ++ val = reg << 16; ++ if (!tg3_readphy(tp, MII_STAT1000, ®)) ++ val |= (reg & 0xffff); ++ } ++ *data++ = val; ++ ++ if (!tg3_readphy(tp, MII_PHYADDR, ®)) ++ val = reg << 16; ++ else ++ val = 0; ++ *data++ = val; ++} ++ ++/* tp->lock is held. */ ++static void tg3_ump_link_report(struct tg3 *tp) ++{ ++ u32 data[4]; ++ ++ if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF)) ++ return; ++ ++ tg3_phy_gather_ump_data(tp, data); ++ ++ tg3_wait_for_event_ack(tp); ++ ++ tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE); ++ tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14); ++ tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x0, data[0]); ++ tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x4, data[1]); ++ tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x8, data[2]); ++ tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0xc, data[3]); ++ ++ tg3_generate_fw_event(tp); ++} ++ ++/* tp->lock is held. */ ++static void tg3_stop_fw(struct tg3 *tp) ++{ ++ if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) { ++ /* Wait for RX cpu to ACK the previous event. */ ++ tg3_wait_for_event_ack(tp); ++ ++ tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW); ++ ++ tg3_generate_fw_event(tp); ++ ++ /* Wait for RX cpu to ACK this event. */ ++ tg3_wait_for_event_ack(tp); ++ } ++} ++ ++/* tp->lock is held. */ ++static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind) ++{ ++ tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX, ++ NIC_SRAM_FIRMWARE_MBOX_MAGIC1); ++ ++ if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) { ++ switch (kind) { ++ case RESET_KIND_INIT: ++ tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, ++ DRV_STATE_START); ++ break; ++ ++ case RESET_KIND_SHUTDOWN: ++ tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, ++ DRV_STATE_UNLOAD); ++ break; ++ ++ case RESET_KIND_SUSPEND: ++ tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, ++ DRV_STATE_SUSPEND); ++ break; ++ ++ default: ++ break; ++ } ++ } ++} ++ ++/* tp->lock is held. */ ++static void tg3_write_sig_post_reset(struct tg3 *tp, int kind) ++{ ++ if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) { ++ switch (kind) { ++ case RESET_KIND_INIT: ++ tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, ++ DRV_STATE_START_DONE); ++ break; ++ ++ case RESET_KIND_SHUTDOWN: ++ tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, ++ DRV_STATE_UNLOAD_DONE); ++ break; ++ ++ default: ++ break; ++ } ++ } ++} ++ ++/* tp->lock is held. */ ++static void tg3_write_sig_legacy(struct tg3 *tp, int kind) ++{ ++ if (tg3_flag(tp, ENABLE_ASF)) { ++ switch (kind) { ++ case RESET_KIND_INIT: ++ tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, ++ DRV_STATE_START); ++ break; ++ ++ case RESET_KIND_SHUTDOWN: ++ tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, ++ DRV_STATE_UNLOAD); ++ break; ++ ++ case RESET_KIND_SUSPEND: ++ tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, ++ DRV_STATE_SUSPEND); ++ break; ++ ++ default: ++ break; ++ } ++ } ++} ++ ++static int tg3_poll_fw(struct tg3 *tp) ++{ ++ int i; ++ u32 val; ++ int fw_timeout = 350000; ++ ++ if (tg3_flag(tp, NO_FWARE_REPORTED)) ++ return 0; ++ ++ if (tg3_flag(tp, IS_SSB_CORE)) { ++ /* We don't use firmware. */ ++ return 0; ++ } ++ ++ if (tg3_asic_rev(tp) == ASIC_REV_5906) { ++ /* Wait up to 20ms for init done. */ ++ for (i = 0; i < 200; i++) { ++ if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE) ++ return 0; ++ if (pci_channel_offline(tp->pdev)) ++ return -ENODEV; ++ ++ udelay(100); ++ } ++ return -ENODEV; ++ } ++ ++ /* Wait for firmware initialization to complete. */ ++ for (i = 0; i < fw_timeout; i++) { ++ tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val); ++ if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1) ++ break; ++ if (pci_channel_offline(tp->pdev)) { ++ if (!tg3_flag(tp, NO_FWARE_REPORTED)) { ++ tg3_flag_set(tp, NO_FWARE_REPORTED); ++ netdev_info(tp->dev, "No firmware running\n"); ++ } ++ ++ break; ++ } ++ ++ udelay(10); ++ } ++ ++ /* Chip might not be fitted with firmware. Some Sun onboard ++ * parts are configured like that. So don't signal the timeout ++ * of the above loop as an error, but do report the lack of ++ * running firmware once. ++ */ ++ if (i >= fw_timeout && !tg3_flag(tp, NO_FWARE_REPORTED)) { ++ tg3_flag_set(tp, NO_FWARE_REPORTED); ++ ++ netdev_info(tp->dev, "No firmware running\n"); ++ } ++ ++ if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) { ++ /* The 57765 A0 needs a little more ++ * time to do some important work. ++ */ ++ mdelay(10); ++ } ++ ++ return 0; ++} ++ ++static void tg3_link_report(struct tg3 *tp) ++{ ++ if (!netif_carrier_ok(tp->dev)) { ++ netif_info(tp, link, tp->dev, "Link is down\n"); ++ tg3_ump_link_report(tp); ++ } else if (netif_msg_link(tp)) { ++ netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n", ++ (tp->link_config.active_speed == SPEED_1000 ? ++ 1000 : ++ (tp->link_config.active_speed == SPEED_100 ? ++ 100 : 10)), ++ (tp->link_config.active_duplex == DUPLEX_FULL ? ++ "full" : "half")); ++ ++ netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n", ++ (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ? ++ "on" : "off", ++ (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ? ++ "on" : "off"); ++ ++ if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) ++ netdev_info(tp->dev, "EEE is %s\n", ++ tp->setlpicnt ? "enabled" : "disabled"); ++ ++ tg3_ump_link_report(tp); ++ } ++ ++ tp->link_up = netif_carrier_ok(tp->dev); ++} ++ ++static u32 tg3_decode_flowctrl_1000T(u32 adv) ++{ ++ u32 flowctrl = 0; ++ ++ if (adv & ADVERTISE_PAUSE_CAP) { ++ flowctrl |= FLOW_CTRL_RX; ++ if (!(adv & ADVERTISE_PAUSE_ASYM)) ++ flowctrl |= FLOW_CTRL_TX; ++ } else if (adv & ADVERTISE_PAUSE_ASYM) ++ flowctrl |= FLOW_CTRL_TX; ++ ++ return flowctrl; ++} ++ ++static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl) ++{ ++ u16 miireg; ++ ++ if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX)) ++ miireg = ADVERTISE_1000XPAUSE; ++ else if (flow_ctrl & FLOW_CTRL_TX) ++ miireg = ADVERTISE_1000XPSE_ASYM; ++ else if (flow_ctrl & FLOW_CTRL_RX) ++ miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM; ++ else ++ miireg = 0; ++ ++ return miireg; ++} ++ ++static u32 tg3_decode_flowctrl_1000X(u32 adv) ++{ ++ u32 flowctrl = 0; ++ ++ if (adv & ADVERTISE_1000XPAUSE) { ++ flowctrl |= FLOW_CTRL_RX; ++ if (!(adv & ADVERTISE_1000XPSE_ASYM)) ++ flowctrl |= FLOW_CTRL_TX; ++ } else if (adv & ADVERTISE_1000XPSE_ASYM) ++ flowctrl |= FLOW_CTRL_TX; ++ ++ return flowctrl; ++} ++ ++static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv) ++{ ++ u8 cap = 0; ++ ++ if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) { ++ cap = FLOW_CTRL_TX | FLOW_CTRL_RX; ++ } else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) { ++ if (lcladv & ADVERTISE_1000XPAUSE) ++ cap = FLOW_CTRL_RX; ++ if (rmtadv & ADVERTISE_1000XPAUSE) ++ cap = FLOW_CTRL_TX; ++ } ++ ++ return cap; ++} ++ ++static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv) ++{ ++ u8 autoneg; ++ u8 flowctrl = 0; ++ u32 old_rx_mode = tp->rx_mode; ++ u32 old_tx_mode = tp->tx_mode; ++ ++#ifdef BCM_INCLUDE_PHYLIB_SUPPORT ++ if (tg3_flag(tp, USE_PHYLIB)) ++ autoneg = tp->mdio_bus->phy_map[tp->phy_addr]->autoneg; ++ else ++#endif /* BCM_INCLUDE_PHYLIB_SUPPORT */ ++ autoneg = tp->link_config.autoneg; ++ ++ if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) { ++ if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) ++ flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv); ++ else ++ flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv); ++ } else ++ flowctrl = tp->link_config.flowctrl; ++ ++ tp->link_config.active_flowctrl = flowctrl; ++ ++ if (flowctrl & FLOW_CTRL_RX) ++ tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE; ++ else ++ tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE; ++ ++ if (old_rx_mode != tp->rx_mode) ++ tw32_f(MAC_RX_MODE, tp->rx_mode); ++ ++ if (flowctrl & FLOW_CTRL_TX) ++ tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE; ++ else ++ tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE; ++ ++ if (old_tx_mode != tp->tx_mode) ++ tw32_f(MAC_TX_MODE, tp->tx_mode); ++} ++ ++#ifdef BCM_INCLUDE_PHYLIB_SUPPORT ++static void tg3_adjust_link(struct net_device *dev) ++{ ++ u8 oldflowctrl, linkmesg = 0; ++ u32 mac_mode, lcl_adv, rmt_adv; ++ struct tg3 *tp = netdev_priv(dev); ++ struct phy_device *phydev = tp->mdio_bus->phy_map[tp->phy_addr]; ++ ++ spin_lock_bh(&tp->lock); ++ ++ mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK | ++ MAC_MODE_HALF_DUPLEX); ++ ++ oldflowctrl = tp->link_config.active_flowctrl; ++ ++ if (phydev->link) { ++ lcl_adv = 0; ++ rmt_adv = 0; ++ ++ if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10) ++ mac_mode |= MAC_MODE_PORT_MODE_MII; ++ else if (phydev->speed == SPEED_1000 || ++ tg3_asic_rev(tp) != ASIC_REV_5785) ++ mac_mode |= MAC_MODE_PORT_MODE_GMII; ++ else ++ mac_mode |= MAC_MODE_PORT_MODE_MII; ++ ++ if (phydev->duplex == DUPLEX_HALF) ++ mac_mode |= MAC_MODE_HALF_DUPLEX; ++ else { ++ lcl_adv = mii_advertise_flowctrl( ++ tp->link_config.flowctrl); ++ ++ if (phydev->pause) ++ rmt_adv = LPA_PAUSE_CAP; ++ if (phydev->asym_pause) ++ rmt_adv |= LPA_PAUSE_ASYM; ++ } ++ ++ tg3_setup_flow_control(tp, lcl_adv, rmt_adv); ++ } else ++ mac_mode |= MAC_MODE_PORT_MODE_GMII; ++ ++ if (mac_mode != tp->mac_mode) { ++ tp->mac_mode = mac_mode; ++ tw32_f(MAC_MODE, tp->mac_mode); ++ udelay(40); ++ } ++ ++ if (tg3_asic_rev(tp) == ASIC_REV_5785) { ++ if (phydev->speed == SPEED_10) ++ tw32(MAC_MI_STAT, ++ MAC_MI_STAT_10MBPS_MODE | ++ MAC_MI_STAT_LNKSTAT_ATTN_ENAB); ++ else ++ tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB); ++ } ++ ++ if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF) ++ tw32(MAC_TX_LENGTHS, ++ ((2 << TX_LENGTHS_IPG_CRS_SHIFT) | ++ (6 << TX_LENGTHS_IPG_SHIFT) | ++ (0xff << TX_LENGTHS_SLOT_TIME_SHIFT))); ++ else ++ tw32(MAC_TX_LENGTHS, ++ ((2 << TX_LENGTHS_IPG_CRS_SHIFT) | ++ (6 << TX_LENGTHS_IPG_SHIFT) | ++ (32 << TX_LENGTHS_SLOT_TIME_SHIFT))); ++ ++ if (phydev->link != tp->old_link || ++ phydev->speed != tp->link_config.active_speed || ++ phydev->duplex != tp->link_config.active_duplex || ++ oldflowctrl != tp->link_config.active_flowctrl) ++ linkmesg = 1; ++ ++ tp->old_link = phydev->link; ++ tp->link_config.active_speed = phydev->speed; ++ tp->link_config.active_duplex = phydev->duplex; ++ ++ spin_unlock_bh(&tp->lock); ++ ++ if (linkmesg) ++ tg3_link_report(tp); ++} ++ ++static int tg3_phy_init(struct tg3 *tp) ++{ ++ struct phy_device *phydev; ++ ++ if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) ++ return 0; ++ ++ /* Bring the PHY back to a known state. */ ++ tg3_bmcr_reset(tp); ++ ++ phydev = tp->mdio_bus->phy_map[tp->phy_addr]; ++ ++ /* Attach the MAC to the PHY. */ ++ phydev = phy_connect(tp->dev, dev_name(&phydev->dev), ++ tg3_adjust_link, phydev->interface); ++ if (IS_ERR(phydev)) { ++ dev_err(&tp->pdev->dev, "Could not attach to PHY\n"); ++ return PTR_ERR(phydev); ++ } ++ ++ /* Mask with MAC supported features. */ ++ switch (phydev->interface) { ++ case PHY_INTERFACE_MODE_GMII: ++ case PHY_INTERFACE_MODE_RGMII: ++ if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) { ++ phydev->supported &= (PHY_GBIT_FEATURES | ++ SUPPORTED_Pause | ++ SUPPORTED_Asym_Pause); ++ break; ++ } ++ /* fallthru */ ++ case PHY_INTERFACE_MODE_MII: ++ phydev->supported &= (PHY_BASIC_FEATURES | ++ SUPPORTED_Pause | ++ SUPPORTED_Asym_Pause); ++ break; ++ default: ++ phy_disconnect(tp->mdio_bus->phy_map[tp->phy_addr]); ++ return -EINVAL; ++ } ++ ++ tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED; ++ ++ phydev->advertising = phydev->supported; ++ ++ return 0; ++} ++ ++static void tg3_phy_start(struct tg3 *tp) ++{ ++ struct phy_device *phydev; ++ ++ if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) ++ return; ++ ++ phydev = tp->mdio_bus->phy_map[tp->phy_addr]; ++ ++ if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) { ++ tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER; ++ phydev->speed = tp->link_config.speed; ++ phydev->duplex = tp->link_config.duplex; ++ phydev->autoneg = tp->link_config.autoneg; ++ phydev->advertising = tp->link_config.advertising; ++ } ++ ++ phy_start(phydev); ++ ++ phy_start_aneg(phydev); ++} ++ ++static void tg3_phy_stop(struct tg3 *tp) ++{ ++ if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) ++ return; ++ ++ phy_stop(tp->mdio_bus->phy_map[tp->phy_addr]); ++} ++ ++static void tg3_phy_fini(struct tg3 *tp) ++{ ++ if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) { ++ phy_disconnect(tp->mdio_bus->phy_map[tp->phy_addr]); ++ tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED; ++ } ++} ++#else ++#define tg3_phy_init(tp) 0 ++#define tg3_phy_start(tp) ++#define tg3_phy_stop(tp) ++#define tg3_phy_fini(tp) ++#endif /* BCM_INCLUDE_PHYLIB_SUPPORT */ ++ ++static int tg3_phy_set_extloopbk(struct tg3 *tp) ++{ ++ int err; ++ u32 val; ++ ++ if (tp->phy_flags & TG3_PHYFLG_IS_FET) ++ return 0; ++ ++ if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) { ++ /* Cannot do read-modify-write on 5401 */ ++ err = tg3_phy_auxctl_write(tp, ++ MII_TG3_AUXCTL_SHDWSEL_AUXCTL, ++ MII_TG3_AUXCTL_ACTL_EXTLOOPBK | ++ 0x4c20); ++ goto done; ++ } ++ ++ err = tg3_phy_auxctl_read(tp, ++ MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val); ++ if (err) ++ return err; ++ ++ val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK; ++ err = tg3_phy_auxctl_write(tp, ++ MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val); ++ ++done: ++ return err; ++} ++ ++static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable) ++{ ++ u32 phytest; ++ ++ if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) { ++ u32 phy; ++ ++ tg3_writephy(tp, MII_TG3_FET_TEST, ++ phytest | MII_TG3_FET_SHADOW_EN); ++ if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) { ++ if (enable) ++ phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD; ++ else ++ phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD; ++ tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy); ++ } ++ tg3_writephy(tp, MII_TG3_FET_TEST, phytest); ++ } ++} ++ ++static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable) ++{ ++ u32 reg; ++ ++ if (!tg3_flag(tp, 5705_PLUS) || ++ (tg3_flag(tp, 5717_PLUS) && ++ (tp->phy_flags & TG3_PHYFLG_MII_SERDES))) ++ return; ++ ++ if (tp->phy_flags & TG3_PHYFLG_IS_FET) { ++ tg3_phy_fet_toggle_apd(tp, enable); ++ return; ++ } ++ ++#ifndef BCM_INCLUDE_PHYLIB_SUPPORT ++ if (tg3_asic_rev(tp) == ASIC_REV_5785) { ++ reg = MII_TG3_MISC_SHDW_SCR5_TRDDAPD | ++ MII_TG3_MISC_SHDW_SCR5_LPED | ++ MII_TG3_MISC_SHDW_SCR5_DLPTLM | ++ MII_TG3_MISC_SHDW_SCR5_SDTL; ++ if ((tp->phy_id & ~TG3_PHY_ID_MASK) < 0x3) { ++ reg |= MII_TG3_MISC_SHDW_SCR5_C125OE; ++ if (!enable) ++ reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD; ++ } ++ } else { ++#endif ++ reg = MII_TG3_MISC_SHDW_SCR5_LPED | ++ MII_TG3_MISC_SHDW_SCR5_DLPTLM | ++ MII_TG3_MISC_SHDW_SCR5_SDTL | ++ MII_TG3_MISC_SHDW_SCR5_C125OE; ++ if (tg3_asic_rev(tp) != ASIC_REV_5784 || !enable) ++ reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD; ++#ifndef BCM_INCLUDE_PHYLIB_SUPPORT ++ } ++#endif ++ ++ tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_SCR5_SEL, reg); ++ ++ ++ reg = MII_TG3_MISC_SHDW_APD_WKTM_84MS; ++ if (enable) ++ reg |= MII_TG3_MISC_SHDW_APD_ENABLE; ++ ++ tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_APD_SEL, reg); ++} ++ ++static void tg3_phy_toggle_automdix(struct tg3 *tp, bool enable) ++{ ++ u32 phy; ++ ++ if (!tg3_flag(tp, 5705_PLUS) || ++ (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) ++ return; ++ ++ if (tp->phy_flags & TG3_PHYFLG_IS_FET) { ++ u32 ephy; ++ ++ if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) { ++ u32 reg = MII_TG3_FET_SHDW_MISCCTRL; ++ ++ tg3_writephy(tp, MII_TG3_FET_TEST, ++ ephy | MII_TG3_FET_SHADOW_EN); ++ if (!tg3_readphy(tp, reg, &phy)) { ++ if (enable) ++ phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX; ++ else ++ phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX; ++ tg3_writephy(tp, reg, phy); ++ } ++ tg3_writephy(tp, MII_TG3_FET_TEST, ephy); ++ } ++ } else { ++ int ret; ++ ++ ret = tg3_phy_auxctl_read(tp, ++ MII_TG3_AUXCTL_SHDWSEL_MISC, &phy); ++ if (!ret) { ++ if (enable) ++ phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX; ++ else ++ phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX; ++ tg3_phy_auxctl_write(tp, ++ MII_TG3_AUXCTL_SHDWSEL_MISC, phy); ++ } ++ } ++} ++ ++static void tg3_phy_set_wirespeed(struct tg3 *tp) ++{ ++ int ret; ++ u32 val; ++ ++ if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) ++ return; ++ ++ ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val); ++ if (!ret) ++ tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, ++ val | MII_TG3_AUXCTL_MISC_WIRESPD_EN); ++} ++ ++static void tg3_phy_apply_otp(struct tg3 *tp) ++{ ++ u32 otp, phy; ++ ++ if (!tp->phy_otp) ++ return; ++ ++ otp = tp->phy_otp; ++ ++ if (tg3_phy_toggle_auxctl_smdsp(tp, true)) ++ return; ++ ++ phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT); ++ phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT; ++ tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy); ++ ++ phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) | ++ ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT); ++ tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy); ++ ++ phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT); ++ phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ; ++ tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy); ++ ++ phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT); ++ tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy); ++ ++ phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT); ++ tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy); ++ ++ phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) | ++ ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT); ++ tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy); ++ ++ tg3_phy_toggle_auxctl_smdsp(tp, false); ++} ++ ++static void tg3_eee_pull_config(struct tg3 *tp, struct ethtool_eee *eee) ++{ ++ u32 val; ++ struct ethtool_eee *dest = &tp->eee; ++ ++ if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) ++ return; ++ ++ if (eee) ++ dest = eee; ++ ++ if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, TG3_CL45_D7_EEERES_STAT, &val)) ++ return; ++ ++ /* Pull eee_active */ ++ if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T || ++ val == TG3_CL45_D7_EEERES_STAT_LP_100TX) { ++ dest->eee_active = 1; ++ } else ++ dest->eee_active = 0; ++ ++ /* Pull lp advertised settings */ ++ if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_LPABLE, &val)) ++ return; ++ dest->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(val); ++ ++ /* Pull advertised and eee_enabled settings */ ++ if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, &val)) ++ return; ++ dest->eee_enabled = !!val; ++ dest->advertised = mmd_eee_adv_to_ethtool_adv_t(val); ++ ++ /* Pull tx_lpi_enabled */ ++ val = tr32(TG3_CPMU_EEE_MODE); ++ dest->tx_lpi_enabled = !!(val & TG3_CPMU_EEEMD_LPI_IN_TX); ++ ++ /* Pull lpi timer value */ ++ dest->tx_lpi_timer = tr32(TG3_CPMU_EEE_DBTMR1) & 0xffff; ++} ++ ++static void tg3_phy_eee_adjust(struct tg3 *tp, bool current_link_up) ++{ ++ u32 val; ++ ++ if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) ++ return; ++ ++#ifndef BCM_INCLUDE_PHYLIB_SUPPORT ++ if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM50612E) ++ return; ++#endif ++ ++ tp->setlpicnt = 0; ++ ++ if (tp->link_config.autoneg == AUTONEG_ENABLE && ++ current_link_up && ++ tp->link_config.active_duplex == DUPLEX_FULL && ++ (tp->link_config.active_speed == SPEED_100 || ++ tp->link_config.active_speed == SPEED_1000)) { ++ u32 eeectl; ++ ++ if (tp->link_config.active_speed == SPEED_1000) ++ eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US; ++ else ++ eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US; ++ ++ tw32(TG3_CPMU_EEE_CTRL, eeectl); ++ ++ tg3_eee_pull_config(tp, NULL); ++ if (tp->eee.eee_active) ++ tp->setlpicnt = 2; ++ } ++ ++ if (!tp->setlpicnt) { ++ if (current_link_up && ++ !tg3_phy_toggle_auxctl_smdsp(tp, true)) { ++ tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000); ++ tg3_phy_toggle_auxctl_smdsp(tp, false); ++ } ++ ++ val = tr32(TG3_CPMU_EEE_MODE); ++ tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE); ++ } ++} ++ ++static void tg3_phy_eee_enable(struct tg3 *tp) ++{ ++ u32 val; ++ ++ if (tp->link_config.active_speed == SPEED_1000 && ++ (tg3_asic_rev(tp) == ASIC_REV_5717 || ++ tg3_asic_rev(tp) == ASIC_REV_5719 || ++ tg3_flag(tp, 57765_CLASS)) && ++ !tg3_phy_toggle_auxctl_smdsp(tp, true)) { ++ val = MII_TG3_DSP_TAP26_ALNOKO | ++ MII_TG3_DSP_TAP26_RMRXSTO; ++ tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val); ++ tg3_phy_toggle_auxctl_smdsp(tp, false); ++ } ++ ++ val = tr32(TG3_CPMU_EEE_MODE); ++ tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE); ++} ++ ++static int tg3_wait_macro_done(struct tg3 *tp) ++{ ++ int limit = 100; ++ ++ while (limit--) { ++ u32 tmp32; ++ ++ if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) { ++ if ((tmp32 & 0x1000) == 0) ++ break; ++ } ++ } ++ if (limit < 0) ++ return -EBUSY; ++ ++ return 0; ++} ++ ++static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp) ++{ ++ static const u32 test_pat[4][6] = { ++ { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 }, ++ { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 }, ++ { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 }, ++ { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 } ++ }; ++ int chan; ++ ++ for (chan = 0; chan < 4; chan++) { ++ int i; ++ ++ tg3_writephy(tp, MII_TG3_DSP_ADDRESS, ++ (chan * 0x2000) | 0x0200); ++ tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002); ++ ++ for (i = 0; i < 6; i++) ++ tg3_writephy(tp, MII_TG3_DSP_RW_PORT, ++ test_pat[chan][i]); ++ ++ tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202); ++ if (tg3_wait_macro_done(tp)) { ++ *resetp = 1; ++ return -EBUSY; ++ } ++ ++ tg3_writephy(tp, MII_TG3_DSP_ADDRESS, ++ (chan * 0x2000) | 0x0200); ++ tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082); ++ if (tg3_wait_macro_done(tp)) { ++ *resetp = 1; ++ return -EBUSY; ++ } ++ ++ tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802); ++ if (tg3_wait_macro_done(tp)) { ++ *resetp = 1; ++ return -EBUSY; ++ } ++ ++ for (i = 0; i < 6; i += 2) { ++ u32 low, high; ++ ++ if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) || ++ tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) || ++ tg3_wait_macro_done(tp)) { ++ *resetp = 1; ++ return -EBUSY; ++ } ++ low &= 0x7fff; ++ high &= 0x000f; ++ if (low != test_pat[chan][i] || ++ high != test_pat[chan][i+1]) { ++ tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b); ++ tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001); ++ tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005); ++ ++ return -EBUSY; ++ } ++ } ++ } ++ ++ return 0; ++} ++ ++static int tg3_phy_reset_chanpat(struct tg3 *tp) ++{ ++ int chan; ++ ++ for (chan = 0; chan < 4; chan++) { ++ int i; ++ ++ tg3_writephy(tp, MII_TG3_DSP_ADDRESS, ++ (chan * 0x2000) | 0x0200); ++ tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002); ++ for (i = 0; i < 6; i++) ++ tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000); ++ tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202); ++ if (tg3_wait_macro_done(tp)) ++ return -EBUSY; ++ } ++ ++ return 0; ++} ++ ++static int tg3_phy_reset_5703_4_5(struct tg3 *tp) ++{ ++ u32 reg32, phy9_orig; ++ int retries, do_phy_reset, err; ++ ++ retries = 10; ++ do_phy_reset = 1; ++ do { ++ if (do_phy_reset) { ++ err = tg3_bmcr_reset(tp); ++ if (err) ++ return err; ++ do_phy_reset = 0; ++ } ++ ++ /* Disable transmitter and interrupt. */ ++ if (tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32)) ++ continue; ++ ++ reg32 |= 0x3000; ++ tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32); ++ ++ /* Set full-duplex, 1000 mbps. */ ++ tg3_writephy(tp, MII_BMCR, ++ BMCR_FULLDPLX | BMCR_SPEED1000); ++ ++ /* Set to master mode. */ ++ if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig)) ++ continue; ++ ++ tg3_writephy(tp, MII_CTRL1000, ++ CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER); ++ ++ err = tg3_phy_toggle_auxctl_smdsp(tp, true); ++ if (err) ++ return err; ++ ++ /* Block the PHY control access. */ ++ tg3_phydsp_write(tp, 0x8005, 0x0800); ++ ++ err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset); ++ if (!err) ++ break; ++ } while (--retries); ++ ++ err = tg3_phy_reset_chanpat(tp); ++ if (err) ++ return err; ++ ++ tg3_phydsp_write(tp, 0x8005, 0x0000); ++ ++ tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200); ++ tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000); ++ ++ tg3_phy_toggle_auxctl_smdsp(tp, false); ++ ++ tg3_writephy(tp, MII_CTRL1000, phy9_orig); ++ ++ if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32)) { ++ reg32 &= ~0x3000; ++ tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32); ++ } else if (!err) ++ err = -EBUSY; ++ ++ return err; ++} ++ ++static void tg3_carrier_off(struct tg3 *tp) ++{ ++ netif_carrier_off(tp->dev); ++ tp->link_up = false; ++} ++ ++static void tg3_warn_mgmt_link_flap(struct tg3 *tp) ++{ ++ if (tg3_flag(tp, ENABLE_ASF)) ++ netdev_warn(tp->dev, ++ "Management side-band traffic will be interrupted during phy settings change\n"); ++} ++ ++/* This will reset the tigon3 PHY if there is no valid ++ * link unless the FORCE argument is non-zero. ++ */ ++static int tg3_phy_reset(struct tg3 *tp) ++{ ++ u32 val, cpmuctrl; ++ int err; ++ ++ if (tg3_asic_rev(tp) == ASIC_REV_5906) { ++ val = tr32(GRC_MISC_CFG); ++ tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ); ++ udelay(40); ++ } ++ err = tg3_readphy(tp, MII_BMSR, &val); ++ err |= tg3_readphy(tp, MII_BMSR, &val); ++ if (err != 0) ++ return -EBUSY; ++ ++ if (netif_running(tp->dev) && tp->link_up) { ++ netif_carrier_off(tp->dev); ++ tg3_link_report(tp); ++ } ++ ++ if (tg3_asic_rev(tp) == ASIC_REV_5703 || ++ tg3_asic_rev(tp) == ASIC_REV_5704 || ++ tg3_asic_rev(tp) == ASIC_REV_5705) { ++ err = tg3_phy_reset_5703_4_5(tp); ++ if (err) ++ return err; ++ goto out; ++ } ++ ++ cpmuctrl = 0; ++ if (tg3_asic_rev(tp) == ASIC_REV_5784 && ++ tg3_chip_rev(tp) != CHIPREV_5784_AX) { ++ cpmuctrl = tr32(TG3_CPMU_CTRL); ++ if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) ++ tw32(TG3_CPMU_CTRL, ++ cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY); ++ } ++ ++ err = tg3_bmcr_reset(tp); ++ if (err) ++ return err; ++ ++ if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) { ++ val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz; ++ tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val); ++ ++ tw32(TG3_CPMU_CTRL, cpmuctrl); ++ } ++ ++ if (tg3_chip_rev(tp) == CHIPREV_5784_AX || ++ tg3_chip_rev(tp) == CHIPREV_5761_AX) { ++ val = tr32(TG3_CPMU_LSPD_1000MB_CLK); ++ if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) == ++ CPMU_LSPD_1000MB_MACCLK_12_5) { ++ val &= ~CPMU_LSPD_1000MB_MACCLK_MASK; ++ udelay(40); ++ tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val); ++ } ++ } ++ ++ if (tg3_flag(tp, 5717_PLUS) && ++ (tp->phy_flags & TG3_PHYFLG_MII_SERDES)) ++ return 0; ++ ++ tg3_phy_apply_otp(tp); ++ ++ if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD) ++ tg3_phy_toggle_apd(tp, true); ++ else ++ tg3_phy_toggle_apd(tp, false); ++ ++out: ++#ifndef BCM_INCLUDE_PHYLIB_SUPPORT ++ if (tg3_asic_rev(tp) == ASIC_REV_5785 && ++ (tp->phy_id & TG3_PHY_ID_MASK) != TG3_PHY_ID_BCMAC131) { ++ /* A0 */ ++ if (tp->phy_id == TG3_PHY_ID_BCM50612E && ++ !tg3_phy_toggle_auxctl_smdsp(tp, true)) { ++ tg3_phydsp_write(tp, 0x0fff, 0x4000); ++ tg3_phydsp_write(tp, 0x0021, 0x4600); ++ tg3_phy_toggle_auxctl_smdsp(tp, false); ++ } ++ ++ if (((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM50610 || ++ (tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM50610M) && ++ !tg3_phy_toggle_auxctl_smdsp(tp, true)) { ++ val = MII_TG3_DSP_EXP8_REJ2MHz; ++ tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val); ++ ++ /* Apply workaround to A0 revision parts only. */ ++ if (tp->phy_id == TG3_PHY_ID_BCM50610 || ++ tp->phy_id == TG3_PHY_ID_BCM50610M) { ++ tg3_phydsp_write(tp, 0x001F, 0x0300); ++ tg3_phydsp_write(tp, 0x601F, 0x0002); ++ tg3_phydsp_write(tp, 0x0F75, 0x003C); ++ tg3_phydsp_write(tp, 0x0F96, 0x0010); ++ tg3_phydsp_write(tp, 0x0F97, 0x0C0C); ++ } ++ ++ tg3_phy_toggle_auxctl_smdsp(tp, false); ++ } ++ ++ tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val); ++ if (tg3_flag(tp, RGMII_INBAND_DISABLE)) ++ val |= MII_TG3_AUXCTL_MISC_RGMII_OOBSC; ++ else ++ val &= ~MII_TG3_AUXCTL_MISC_RGMII_OOBSC; ++ tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, val); ++ ++ /* Clear all mode configuration bits. */ ++ if (!tg3_phy_shdw_read(tp, MII_TG3_MISC_SHDW_RGMII_SEL, &val)) { ++ val &= ~(MII_TG3_MISC_SHDW_RGMII_MODESEL0 | ++ MII_TG3_MISC_SHDW_RGMII_MODESEL1); ++ tg3_phy_shdw_write(tp, ++ MII_TG3_MISC_SHDW_RGMII_SEL, val); ++ } ++ } ++ ++ if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM57780 && ++ !tg3_phy_toggle_auxctl_smdsp(tp, true)) { ++ tg3_writephy(tp, MII_TG3_DSP_ADDRESS, MII_TG3_DSP_EXP75); ++ tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &val); ++ val |= MII_TG3_DSP_EXP75_SUP_CM_OSC; ++ tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, val); ++ ++ tg3_phy_toggle_auxctl_smdsp(tp, false); ++ } ++#endif ++ if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) && ++ !tg3_phy_toggle_auxctl_smdsp(tp, true)) { ++ tg3_phydsp_write(tp, 0x201f, 0x2aaa); ++ tg3_phydsp_write(tp, 0x000a, 0x0323); ++ tg3_phy_toggle_auxctl_smdsp(tp, false); ++ } ++ ++ if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) { ++ tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68); ++ tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68); ++ } ++ ++ if (tp->phy_flags & TG3_PHYFLG_BER_BUG) { ++ if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) { ++ tg3_phydsp_write(tp, 0x000a, 0x310b); ++ tg3_phydsp_write(tp, 0x201f, 0x9506); ++ tg3_phydsp_write(tp, 0x401f, 0x14e2); ++ tg3_phy_toggle_auxctl_smdsp(tp, false); ++ } ++ } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) { ++ if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) { ++ tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a); ++ if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) { ++ tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b); ++ tg3_writephy(tp, MII_TG3_TEST1, ++ MII_TG3_TEST1_TRIM_EN | 0x4); ++ } else ++ tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b); ++ ++ tg3_phy_toggle_auxctl_smdsp(tp, false); ++ } ++ } ++ ++ /* Set Extended packet length bit (bit 14) on all chips that */ ++ /* support jumbo frames */ ++ if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) { ++ /* Cannot do read-modify-write on 5401 */ ++ tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20); ++ } else if (tg3_flag(tp, JUMBO_CAPABLE)) { ++ /* Set bit 14 with read-modify-write to preserve other bits */ ++ err = tg3_phy_auxctl_read(tp, ++ MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val); ++ if (!err) ++ tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, ++ val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN); ++ } ++ ++ /* Set phy register 0x10 bit 0 to high fifo elasticity to support ++ * jumbo frames transmission. ++ */ ++ if (tg3_flag(tp, JUMBO_CAPABLE)) { ++ if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val)) ++ tg3_writephy(tp, MII_TG3_EXT_CTRL, ++ val | MII_TG3_EXT_CTRL_FIFO_ELASTIC); ++ } ++ ++ if (tg3_asic_rev(tp) == ASIC_REV_5906) { ++ /* adjust output voltage */ ++ tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12); ++ } ++#ifndef BCM_INCLUDE_PHYLIB_SUPPORT ++ else if (tp->phy_flags & TG3_PHYFLG_IS_FET) { ++ u32 brcmtest; ++ if (!tg3_readphy(tp, MII_TG3_FET_TEST, &brcmtest) && ++ !tg3_writephy(tp, MII_TG3_FET_TEST, ++ brcmtest | MII_TG3_FET_SHADOW_EN)) { ++ u32 reg = MII_TG3_FET_SHDW_AUXMODE4; ++ ++ if (!tg3_readphy(tp, reg, &val)) { ++ val &= ~MII_TG3_FET_SHDW_AM4_LED_MASK; ++ val |= MII_TG3_FET_SHDW_AM4_LED_MODE1; ++ tg3_writephy(tp, reg, val); ++ } ++ ++ tg3_writephy(tp, MII_TG3_FET_TEST, brcmtest); ++ } ++ } ++#endif ++ ++ if (tg3_chip_rev_id(tp) == CHIPREV_ID_5762_A0) ++ tg3_phydsp_write(tp, 0xffb, 0x4000); ++ ++ tg3_phy_toggle_automdix(tp, true); ++ tg3_phy_set_wirespeed(tp); ++ return 0; ++} ++ ++#define TG3_GPIO_MSG_DRVR_PRES 0x00000001 ++#define TG3_GPIO_MSG_NEED_VAUX 0x00000002 ++#define TG3_GPIO_MSG_MASK (TG3_GPIO_MSG_DRVR_PRES | \ ++ TG3_GPIO_MSG_NEED_VAUX) ++#define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \ ++ ((TG3_GPIO_MSG_DRVR_PRES << 0) | \ ++ (TG3_GPIO_MSG_DRVR_PRES << 4) | \ ++ (TG3_GPIO_MSG_DRVR_PRES << 8) | \ ++ (TG3_GPIO_MSG_DRVR_PRES << 12)) ++ ++#define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \ ++ ((TG3_GPIO_MSG_NEED_VAUX << 0) | \ ++ (TG3_GPIO_MSG_NEED_VAUX << 4) | \ ++ (TG3_GPIO_MSG_NEED_VAUX << 8) | \ ++ (TG3_GPIO_MSG_NEED_VAUX << 12)) ++ ++static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat) ++{ ++ u32 status, shift; ++ ++ if (tg3_asic_rev(tp) == ASIC_REV_5717 || ++ tg3_asic_rev(tp) == ASIC_REV_5719) ++ status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG); ++ else ++ status = tr32(TG3_CPMU_DRV_STATUS); ++ ++ shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn; ++ status &= ~(TG3_GPIO_MSG_MASK << shift); ++ status |= (newstat << shift); ++ ++ if (tg3_asic_rev(tp) == ASIC_REV_5717 || ++ tg3_asic_rev(tp) == ASIC_REV_5719) ++ tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status); ++ else ++ tw32(TG3_CPMU_DRV_STATUS, status); ++ ++ return status >> TG3_APE_GPIO_MSG_SHIFT; ++} ++ ++static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp) ++{ ++ if (!tg3_flag(tp, IS_NIC)) ++ return 0; ++ ++ if (tg3_asic_rev(tp) == ASIC_REV_5717 || ++ tg3_asic_rev(tp) == ASIC_REV_5719 || ++ tg3_asic_rev(tp) == ASIC_REV_5720) { ++ if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO)) ++ return -EIO; ++ ++ tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES); ++ ++ tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, ++ TG3_GRC_LCLCTL_PWRSW_DELAY); ++ ++ tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO); ++ } else { ++ tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, ++ TG3_GRC_LCLCTL_PWRSW_DELAY); ++ } ++ ++ return 0; ++} ++ ++static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp) ++{ ++ u32 grc_local_ctrl; ++ ++ if (!tg3_flag(tp, IS_NIC) || ++ tg3_asic_rev(tp) == ASIC_REV_5700 || ++ tg3_asic_rev(tp) == ASIC_REV_5701) ++ return; ++ ++ grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1; ++ ++ tw32_wait_f(GRC_LOCAL_CTRL, ++ grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1, ++ TG3_GRC_LCLCTL_PWRSW_DELAY); ++ ++ tw32_wait_f(GRC_LOCAL_CTRL, ++ grc_local_ctrl, ++ TG3_GRC_LCLCTL_PWRSW_DELAY); ++ ++ tw32_wait_f(GRC_LOCAL_CTRL, ++ grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1, ++ TG3_GRC_LCLCTL_PWRSW_DELAY); ++} ++ ++static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp) ++{ ++ if (!tg3_flag(tp, IS_NIC)) ++ return; ++ ++ if (tg3_asic_rev(tp) == ASIC_REV_5700 || ++ tg3_asic_rev(tp) == ASIC_REV_5701) { ++ tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl | ++ (GRC_LCLCTRL_GPIO_OE0 | ++ GRC_LCLCTRL_GPIO_OE1 | ++ GRC_LCLCTRL_GPIO_OE2 | ++ GRC_LCLCTRL_GPIO_OUTPUT0 | ++ GRC_LCLCTRL_GPIO_OUTPUT1), ++ TG3_GRC_LCLCTL_PWRSW_DELAY); ++ } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 || ++ tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) { ++ /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */ ++ u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 | ++ GRC_LCLCTRL_GPIO_OE1 | ++ GRC_LCLCTRL_GPIO_OE2 | ++ GRC_LCLCTRL_GPIO_OUTPUT0 | ++ GRC_LCLCTRL_GPIO_OUTPUT1 | ++ tp->grc_local_ctrl; ++ tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, ++ TG3_GRC_LCLCTL_PWRSW_DELAY); ++ ++ grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2; ++ tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, ++ TG3_GRC_LCLCTL_PWRSW_DELAY); ++ ++ grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0; ++ tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, ++ TG3_GRC_LCLCTL_PWRSW_DELAY); ++ } else { ++ u32 no_gpio2; ++ u32 grc_local_ctrl = 0; ++ ++ /* Workaround to prevent overdrawing Amps. */ ++ if (tg3_asic_rev(tp) == ASIC_REV_5714) { ++ grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3; ++ tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl | ++ grc_local_ctrl, ++ TG3_GRC_LCLCTL_PWRSW_DELAY); ++ } ++ ++ /* On 5753 and variants, GPIO2 cannot be used. */ ++ no_gpio2 = tp->nic_sram_data_cfg & ++ NIC_SRAM_DATA_CFG_NO_GPIO2; ++ ++ grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 | ++ GRC_LCLCTRL_GPIO_OE1 | ++ GRC_LCLCTRL_GPIO_OE2 | ++ GRC_LCLCTRL_GPIO_OUTPUT1 | ++ GRC_LCLCTRL_GPIO_OUTPUT2; ++ if (no_gpio2) { ++ grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 | ++ GRC_LCLCTRL_GPIO_OUTPUT2); ++ } ++ tw32_wait_f(GRC_LOCAL_CTRL, ++ tp->grc_local_ctrl | grc_local_ctrl, ++ TG3_GRC_LCLCTL_PWRSW_DELAY); ++ ++ grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0; ++ ++ tw32_wait_f(GRC_LOCAL_CTRL, ++ tp->grc_local_ctrl | grc_local_ctrl, ++ TG3_GRC_LCLCTL_PWRSW_DELAY); ++ ++ if (!no_gpio2) { ++ grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2; ++ tw32_wait_f(GRC_LOCAL_CTRL, ++ tp->grc_local_ctrl | grc_local_ctrl, ++ TG3_GRC_LCLCTL_PWRSW_DELAY); ++ } ++ } ++} ++ ++static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable) ++{ ++ u32 msg = 0; ++ ++ /* Serialize power state transitions */ ++ if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO)) ++ return; ++ ++ if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable) ++ msg = TG3_GPIO_MSG_NEED_VAUX; ++ ++ msg = tg3_set_function_status(tp, msg); ++ ++ if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK) ++ goto done; ++ ++ if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK) ++ tg3_pwrsrc_switch_to_vaux(tp); ++ else ++ tg3_pwrsrc_die_with_vmain(tp); ++ ++done: ++ tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO); ++} ++ ++static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol) ++{ ++ bool need_vaux = false; ++ ++ /* The GPIOs do something completely different on 57765. */ ++ if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS)) ++ return; ++ ++ if (tg3_asic_rev(tp) == ASIC_REV_5717 || ++ tg3_asic_rev(tp) == ASIC_REV_5719 || ++ tg3_asic_rev(tp) == ASIC_REV_5720) { ++ tg3_frob_aux_power_5717(tp, include_wol ? ++ tg3_flag(tp, WOL_ENABLE) != 0 : 0); ++ return; ++ } ++ ++ if (tp->pdev_peer && tp->pdev_peer != tp->pdev) { ++ struct net_device *dev_peer; ++ ++ dev_peer = pci_get_drvdata(tp->pdev_peer); ++ ++ /* remove_one() may have been run on the peer. */ ++ if (dev_peer) { ++ struct tg3 *tp_peer = netdev_priv(dev_peer); ++ ++ if (tg3_flag(tp_peer, INIT_COMPLETE)) ++ return; ++ ++ if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) || ++ tg3_flag(tp_peer, ENABLE_ASF)) ++ need_vaux = true; ++ } ++ } ++ ++ if ((include_wol && tg3_flag(tp, WOL_ENABLE)) || ++ tg3_flag(tp, ENABLE_ASF)) ++ need_vaux = true; ++ ++ if (need_vaux) ++ tg3_pwrsrc_switch_to_vaux(tp); ++ else ++ tg3_pwrsrc_die_with_vmain(tp); ++} ++ ++static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed) ++{ ++ if (tp->led_ctrl == LED_CTRL_MODE_PHY_2) ++ return 1; ++ else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) { ++ if (speed != SPEED_10) ++ return 1; ++ } else if (speed == SPEED_10) ++ return 1; ++ ++ return 0; ++} ++ ++static bool tg3_phy_power_bug(struct tg3 *tp) ++{ ++ switch (tg3_asic_rev(tp)) { ++ case ASIC_REV_5700: ++ case ASIC_REV_5704: ++ return true; ++ case ASIC_REV_5780: ++ if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) ++ return true; ++ return false; ++ case ASIC_REV_5717: ++ if (!tp->pci_fn) ++ return true; ++ return false; ++ case ASIC_REV_5719: ++ case ASIC_REV_5720: ++ if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) && ++ !tp->pci_fn) ++ return true; ++ return false; ++ } ++ ++ return false; ++} ++ ++static bool tg3_phy_led_bug(struct tg3 *tp) ++{ ++ switch (tg3_asic_rev(tp)) { ++ case ASIC_REV_5719: ++ case ASIC_REV_5720: ++ if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) && ++ !tp->pci_fn) ++ return true; ++ return false; ++ } ++ ++ return false; ++} ++ ++static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power) ++{ ++ u32 val; ++ ++ if (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) ++ return; ++ ++ if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) { ++ if (tg3_asic_rev(tp) == ASIC_REV_5704) { ++ u32 sg_dig_ctrl = tr32(SG_DIG_CTRL); ++ u32 serdes_cfg = tr32(MAC_SERDES_CFG); ++ ++ sg_dig_ctrl |= ++ SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET; ++ tw32(SG_DIG_CTRL, sg_dig_ctrl); ++ tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15)); ++ } ++ return; ++ } ++ ++ if (tg3_asic_rev(tp) == ASIC_REV_5906) { ++ tg3_bmcr_reset(tp); ++ val = tr32(GRC_MISC_CFG); ++ tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ); ++ udelay(40); ++ return; ++ } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) { ++ u32 phytest; ++ if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) { ++ u32 phy; ++ ++ tg3_writephy(tp, MII_ADVERTISE, 0); ++ tg3_writephy(tp, MII_BMCR, ++ BMCR_ANENABLE | BMCR_ANRESTART); ++ ++ tg3_writephy(tp, MII_TG3_FET_TEST, ++ phytest | MII_TG3_FET_SHADOW_EN); ++ if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) { ++ phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD; ++ tg3_writephy(tp, ++ MII_TG3_FET_SHDW_AUXMODE4, ++ phy); ++ } ++ tg3_writephy(tp, MII_TG3_FET_TEST, phytest); ++ } ++ return; ++ } else if (do_low_power) { ++ if (!tg3_phy_led_bug(tp)) ++ tg3_writephy(tp, MII_TG3_EXT_CTRL, ++ MII_TG3_EXT_CTRL_FORCE_LED_OFF); ++ ++ val = MII_TG3_AUXCTL_PCTL_100TX_LPWR | ++ MII_TG3_AUXCTL_PCTL_SPR_ISOLATE | ++ MII_TG3_AUXCTL_PCTL_VREG_11V; ++ tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val); ++ } ++ ++ /* The PHY should not be powered down on some chips because ++ * of bugs. ++ */ ++ if (tg3_phy_power_bug(tp)) ++ return; ++ ++ if (tg3_chip_rev(tp) == CHIPREV_5784_AX || ++ tg3_chip_rev(tp) == CHIPREV_5761_AX) { ++ val = tr32(TG3_CPMU_LSPD_1000MB_CLK); ++ val &= ~CPMU_LSPD_1000MB_MACCLK_MASK; ++ val |= CPMU_LSPD_1000MB_MACCLK_12_5; ++ tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val); ++ } ++ ++ tg3_writephy(tp, MII_BMCR, BMCR_PDOWN); ++} ++ ++/* tp->lock is held. */ ++static int tg3_nvram_lock(struct tg3 *tp) ++{ ++ if (tg3_flag(tp, NVRAM)) { ++ int i; ++ ++ if (tp->nvram_lock_cnt == 0) { ++ tw32(NVRAM_SWARB, SWARB_REQ_SET1); ++ for (i = 0; i < 8000; i++) { ++ if (tr32(NVRAM_SWARB) & SWARB_GNT1) ++ break; ++ udelay(20); ++ } ++ if (i == 8000) { ++ tw32(NVRAM_SWARB, SWARB_REQ_CLR1); ++ return -ENODEV; ++ } ++ } ++ tp->nvram_lock_cnt++; ++ } ++ return 0; ++} ++ ++/* tp->lock is held. */ ++static void tg3_nvram_unlock(struct tg3 *tp) ++{ ++ if (tg3_flag(tp, NVRAM)) { ++ if (tp->nvram_lock_cnt > 0) ++ tp->nvram_lock_cnt--; ++ if (tp->nvram_lock_cnt == 0) ++ tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1); ++ } ++} ++ ++/* tp->lock is held. */ ++static void tg3_enable_nvram_access(struct tg3 *tp) ++{ ++ if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) { ++ u32 nvaccess = tr32(NVRAM_ACCESS); ++ ++ tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE); ++ } ++} ++ ++/* tp->lock is held. */ ++static void tg3_disable_nvram_access(struct tg3 *tp) ++{ ++ if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) { ++ u32 nvaccess = tr32(NVRAM_ACCESS); ++ ++ tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE); ++ } ++} ++ ++static int tg3_nvram_read_using_eeprom(struct tg3 *tp, ++ u32 offset, u32 *val) ++{ ++ u32 tmp; ++ int i; ++ ++ if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0) ++ return -EINVAL; ++ ++ tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK | ++ EEPROM_ADDR_DEVID_MASK | ++ EEPROM_ADDR_READ); ++ tw32(GRC_EEPROM_ADDR, ++ tmp | ++ (0 << EEPROM_ADDR_DEVID_SHIFT) | ++ ((offset << EEPROM_ADDR_ADDR_SHIFT) & ++ EEPROM_ADDR_ADDR_MASK) | ++ EEPROM_ADDR_READ | EEPROM_ADDR_START); ++ ++ for (i = 0; i < 1000; i++) { ++ tmp = tr32(GRC_EEPROM_ADDR); ++ ++ if (tmp & EEPROM_ADDR_COMPLETE) ++ break; ++ msleep(1); ++ } ++ if (!(tmp & EEPROM_ADDR_COMPLETE)) ++ return -EBUSY; ++ ++ tmp = tr32(GRC_EEPROM_DATA); ++ ++ /* ++ * The data will always be opposite the native endian ++ * format. Perform a blind byteswap to compensate. ++ */ ++ *val = swab32(tmp); ++ ++ return 0; ++} ++ ++#define NVRAM_CMD_TIMEOUT 10000 ++ ++static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd) ++{ ++ int i; ++ ++ tw32(NVRAM_CMD, nvram_cmd); ++ for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) { ++#if defined(__VMKLNX__) || (LINUX_VERSION_CODE < 0x020627) /* 2.6.39 */ ++ udelay(10); ++#else ++ usleep_range(10, 40); ++#endif ++ if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) { ++ udelay(10); ++ break; ++ } ++ } ++ ++ if (i == NVRAM_CMD_TIMEOUT) ++ return -EBUSY; ++ ++ return 0; ++} ++ ++static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr) ++{ ++ if (tg3_flag(tp, NVRAM) && ++ tg3_flag(tp, NVRAM_BUFFERED) && ++ tg3_flag(tp, FLASH) && ++ !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) && ++ (tp->nvram_jedecnum == JEDEC_ATMEL)) ++ ++ addr = ((addr / tp->nvram_pagesize) << ++ ATMEL_AT45DB0X1B_PAGE_POS) + ++ (addr % tp->nvram_pagesize); ++ ++ return addr; ++} ++ ++static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr) ++{ ++ if (tg3_flag(tp, NVRAM) && ++ tg3_flag(tp, NVRAM_BUFFERED) && ++ tg3_flag(tp, FLASH) && ++ !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) && ++ (tp->nvram_jedecnum == JEDEC_ATMEL)) ++ ++ addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) * ++ tp->nvram_pagesize) + ++ (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1)); ++ ++ return addr; ++} ++ ++/* NOTE: Data read in from NVRAM is byteswapped according to ++ * the byteswapping settings for all other register accesses. ++ * tg3 devices are BE devices, so on a BE machine, the data ++ * returned will be exactly as it is seen in NVRAM. On a LE ++ * machine, the 32-bit value will be byteswapped. ++ */ ++static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val) ++{ ++ int ret; ++ ++ if (!tg3_flag(tp, NVRAM)) ++ return tg3_nvram_read_using_eeprom(tp, offset, val); ++ ++ offset = tg3_nvram_phys_addr(tp, offset); ++ ++ if (offset > NVRAM_ADDR_MSK) ++ return -EINVAL; ++ ++ ret = tg3_nvram_lock(tp); ++ if (ret) ++ return ret; ++ ++ tg3_enable_nvram_access(tp); ++ ++ tw32(NVRAM_ADDR, offset); ++ ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO | ++ NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE); ++ ++ if (ret == 0) ++ *val = tr32(NVRAM_RDDATA); ++ ++ tg3_disable_nvram_access(tp); ++ ++ tg3_nvram_unlock(tp); ++ ++ return ret; ++} ++ ++/* Ensures NVRAM data is in bytestream format. */ ++static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val) ++{ ++ u32 v; ++ int res = tg3_nvram_read(tp, offset, &v); ++ if (!res) ++ *val = cpu_to_be32(v); ++ return res; ++} ++ ++static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp, ++ u32 offset, u32 len, u8 *buf) ++{ ++ int i, j, rc = 0; ++ u32 val; ++ ++ for (i = 0; i < len; i += 4) { ++ u32 addr; ++ __be32 data; ++ ++ addr = offset + i; ++ ++ memcpy(&data, buf + i, 4); ++ ++ /* ++ * The SEEPROM interface expects the data to always be opposite ++ * the native endian format. We accomplish this by reversing ++ * all the operations that would have been performed on the ++ * data from a call to tg3_nvram_read_be32(). ++ */ ++ tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data))); ++ ++ val = tr32(GRC_EEPROM_ADDR); ++ tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE); ++ ++ val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK | ++ EEPROM_ADDR_READ); ++ tw32(GRC_EEPROM_ADDR, val | ++ (0 << EEPROM_ADDR_DEVID_SHIFT) | ++ (addr & EEPROM_ADDR_ADDR_MASK) | ++ EEPROM_ADDR_START | ++ EEPROM_ADDR_WRITE); ++ ++ for (j = 0; j < 1000; j++) { ++ val = tr32(GRC_EEPROM_ADDR); ++ ++ if (val & EEPROM_ADDR_COMPLETE) ++ break; ++ msleep(1); ++ } ++ if (!(val & EEPROM_ADDR_COMPLETE)) { ++ rc = -EBUSY; ++ break; ++ } ++ } ++ ++ return rc; ++} ++ ++/* offset and length are dword aligned */ ++static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len, ++ u8 *buf) ++{ ++ int ret = 0; ++ u32 pagesize = tp->nvram_pagesize; ++ u32 pagemask = pagesize - 1; ++ u32 nvram_cmd; ++ u8 *tmp; ++ ++ tmp = kmalloc(pagesize, GFP_KERNEL); ++ if (tmp == NULL) ++ return -ENOMEM; ++ ++ while (len) { ++ int j; ++ u32 phy_addr, page_off, size; ++ ++ phy_addr = offset & ~pagemask; ++ ++ for (j = 0; j < pagesize; j += 4) { ++ ret = tg3_nvram_read_be32(tp, phy_addr + j, ++ (__be32 *) (tmp + j)); ++ if (ret) ++ break; ++ } ++ if (ret) ++ break; ++ ++ page_off = offset & pagemask; ++ size = pagesize; ++ if (len < size) ++ size = len; ++ ++ len -= size; ++ ++ memcpy(tmp + page_off, buf, size); ++ ++ offset = offset + (pagesize - page_off); ++ ++ tg3_enable_nvram_access(tp); ++ ++ /* ++ * Before we can erase the flash page, we need ++ * to issue a special "write enable" command. ++ */ ++ nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE; ++ ++ if (tg3_nvram_exec_cmd(tp, nvram_cmd)) ++ break; ++ ++ /* Erase the target page */ ++ tw32(NVRAM_ADDR, phy_addr); ++ ++ nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR | ++ NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE; ++ ++ if (tg3_nvram_exec_cmd(tp, nvram_cmd)) ++ break; ++ ++ /* Issue another write enable to start the write. */ ++ nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE; ++ ++ if (tg3_nvram_exec_cmd(tp, nvram_cmd)) ++ break; ++ ++ for (j = 0; j < pagesize; j += 4) { ++ __be32 data; ++ ++ data = *((__be32 *) (tmp + j)); ++ ++ tw32(NVRAM_WRDATA, be32_to_cpu(data)); ++ ++ tw32(NVRAM_ADDR, phy_addr + j); ++ ++ nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | ++ NVRAM_CMD_WR; ++ ++ if (j == 0) ++ nvram_cmd |= NVRAM_CMD_FIRST; ++ else if (j == (pagesize - 4)) ++ nvram_cmd |= NVRAM_CMD_LAST; ++ ++ ret = tg3_nvram_exec_cmd(tp, nvram_cmd); ++ if (ret) ++ break; ++ } ++ if (ret) ++ break; ++ } ++ ++ nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE; ++ tg3_nvram_exec_cmd(tp, nvram_cmd); ++ ++ kfree(tmp); ++ ++ return ret; ++} ++ ++/* offset and length are dword aligned */ ++static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len, ++ u8 *buf) ++{ ++ int i, ret = 0; ++ ++ for (i = 0; i < len; i += 4, offset += 4) { ++ u32 page_off, phy_addr, nvram_cmd; ++ __be32 data; ++ ++ memcpy(&data, buf + i, 4); ++ tw32(NVRAM_WRDATA, be32_to_cpu(data)); ++ ++ page_off = offset % tp->nvram_pagesize; ++ ++ phy_addr = tg3_nvram_phys_addr(tp, offset); ++ ++ nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR; ++ ++ if (page_off == 0 || i == 0) ++ nvram_cmd |= NVRAM_CMD_FIRST; ++ if (page_off == (tp->nvram_pagesize - 4)) ++ nvram_cmd |= NVRAM_CMD_LAST; ++ ++ if (i == (len - 4)) ++ nvram_cmd |= NVRAM_CMD_LAST; ++ ++ if ((nvram_cmd & NVRAM_CMD_FIRST) || ++ !tg3_flag(tp, FLASH) || ++ !tg3_flag(tp, 57765_PLUS)) ++ tw32(NVRAM_ADDR, phy_addr); ++ ++ if (tg3_asic_rev(tp) != ASIC_REV_5752 && ++ !tg3_flag(tp, 5755_PLUS) && ++ (tp->nvram_jedecnum == JEDEC_ST) && ++ (nvram_cmd & NVRAM_CMD_FIRST)) { ++ u32 cmd; ++ ++ cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE; ++ ret = tg3_nvram_exec_cmd(tp, cmd); ++ if (ret) ++ break; ++ } ++ if (!tg3_flag(tp, FLASH)) { ++ /* We always do complete word writes to eeprom. */ ++ nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST); ++ } ++ ++ ret = tg3_nvram_exec_cmd(tp, nvram_cmd); ++ if (ret) ++ break; ++ } ++ return ret; ++} ++ ++/* offset and length are dword aligned */ ++static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf) ++{ ++ int ret; ++ ++ if (tg3_flag(tp, EEPROM_WRITE_PROT)) { ++ tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl & ++ ~GRC_LCLCTRL_GPIO_OUTPUT1); ++ udelay(40); ++ } ++ ++ if (!tg3_flag(tp, NVRAM)) { ++ ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf); ++ } else { ++ u32 grc_mode; ++ ++ ret = tg3_nvram_lock(tp); ++ if (ret) ++ return ret; ++ ++ tg3_enable_nvram_access(tp); ++ if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) ++ tw32(NVRAM_WRITE1, 0x406); ++ ++ grc_mode = tr32(GRC_MODE); ++ tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE); ++ ++ if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) { ++ ret = tg3_nvram_write_block_buffered(tp, offset, len, ++ buf); ++ } else { ++ ret = tg3_nvram_write_block_unbuffered(tp, offset, len, ++ buf); ++ } ++ ++ grc_mode = tr32(GRC_MODE); ++ tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE); ++ ++ tg3_disable_nvram_access(tp); ++ tg3_nvram_unlock(tp); ++ } ++ ++ if (tg3_flag(tp, EEPROM_WRITE_PROT)) { ++ tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl); ++ udelay(40); ++ } ++ ++ return ret; ++} ++ ++#define RX_CPU_SCRATCH_BASE 0x30000 ++#define RX_CPU_SCRATCH_SIZE 0x04000 ++#define TX_CPU_SCRATCH_BASE 0x34000 ++#define TX_CPU_SCRATCH_SIZE 0x04000 ++ ++/* tp->lock is held. */ ++static int tg3_pause_cpu(struct tg3 *tp, u32 cpu_base) ++{ ++ int i; ++ const int iters = 10000; ++ ++ for (i = 0; i < iters; i++) { ++ tw32(cpu_base + CPU_STATE, 0xffffffff); ++ tw32(cpu_base + CPU_MODE, CPU_MODE_HALT); ++ if (tr32(cpu_base + CPU_MODE) & CPU_MODE_HALT) ++ break; ++ if (pci_channel_offline(tp->pdev)) ++ return -EBUSY; ++ } ++ ++ return (i == iters) ? -EBUSY : 0; ++} ++ ++/* tp->lock is held. */ ++static int tg3_rxcpu_pause(struct tg3 *tp) ++{ ++ int rc = tg3_pause_cpu(tp, RX_CPU_BASE); ++ ++ tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff); ++ tw32_f(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT); ++ udelay(10); ++ ++ return rc; ++} ++ ++/* tp->lock is held. */ ++static int tg3_txcpu_pause(struct tg3 *tp) ++{ ++ return tg3_pause_cpu(tp, TX_CPU_BASE); ++} ++ ++/* tp->lock is held. */ ++static void tg3_resume_cpu(struct tg3 *tp, u32 cpu_base) ++{ ++ tw32(cpu_base + CPU_STATE, 0xffffffff); ++ tw32_f(cpu_base + CPU_MODE, 0x00000000); ++} ++ ++/* tp->lock is held. */ ++static void tg3_rxcpu_resume(struct tg3 *tp) ++{ ++ tg3_resume_cpu(tp, RX_CPU_BASE); ++} ++ ++/* tp->lock is held. */ ++static int tg3_halt_cpu(struct tg3 *tp, u32 cpu_base) ++{ ++ int rc; ++ ++ BUG_ON(cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)); ++ ++ if (tg3_asic_rev(tp) == ASIC_REV_5906) { ++ u32 val = tr32(GRC_VCPU_EXT_CTRL); ++ ++ tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU); ++ return 0; ++ } ++ if (cpu_base == RX_CPU_BASE) { ++ rc = tg3_rxcpu_pause(tp); ++ } else { ++ /* ++ * There is only an Rx CPU for the 5750 derivative in the ++ * BCM4785. ++ */ ++ if (tg3_flag(tp, IS_SSB_CORE)) ++ return 0; ++ ++ rc = tg3_txcpu_pause(tp); ++ } ++ ++ if (rc) { ++ netdev_err(tp->dev, "%s timed out, %s CPU\n", ++ __func__, cpu_base == RX_CPU_BASE ? "RX" : "TX"); ++ return -ENODEV; ++ } ++ ++ /* Clear firmware's nvram arbitration. */ ++ if (tg3_flag(tp, NVRAM)) ++ tw32(NVRAM_SWARB, SWARB_REQ_CLR0); ++ return 0; ++} ++ ++static int tg3_fw_data_len(const struct tg3_firmware_hdr *fw_hdr) ++{ ++ return (fw_hdr->len - TG3_FW_HDR_LEN) / sizeof(u32); ++} ++ ++/* tp->lock is held. */ ++static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, ++ u32 cpu_scratch_base, int cpu_scratch_size, ++ const struct tg3_firmware_hdr *fw_hdr) ++{ ++ int err, i; ++ void (*write_op)(struct tg3 *, u32, u32); ++ int total_len = tp->fw->size; ++ ++ if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) { ++ netdev_err(tp->dev, ++ "%s: Trying to load TX cpu firmware which is 5705\n", ++ __func__); ++ return -EINVAL; ++ } ++ ++ if (tg3_flag(tp, 5705_PLUS) && tg3_asic_rev(tp) != ASIC_REV_57766) ++ write_op = tg3_write_mem; ++ else ++ write_op = tg3_write_indirect_reg32; ++ ++ if (tg3_asic_rev(tp) != ASIC_REV_57766) { ++ /* It is possible that bootcode is still loading at this point. ++ * Get the nvram lock first before halting the cpu. ++ */ ++ int lock_err = tg3_nvram_lock(tp); ++ err = tg3_halt_cpu(tp, cpu_base); ++ if (!lock_err) ++ tg3_nvram_unlock(tp); ++ if (err) ++ goto out; ++ ++ for (i = 0; i < cpu_scratch_size; i += sizeof(u32)) ++ write_op(tp, cpu_scratch_base + i, 0); ++ tw32(cpu_base + CPU_STATE, 0xffffffff); ++ tw32(cpu_base + CPU_MODE, ++ tr32(cpu_base + CPU_MODE) | CPU_MODE_HALT); ++ } else { ++ /* Subtract additional main header for fragmented firmware and ++ * advance to the first fragment ++ */ ++ total_len -= TG3_FW_HDR_LEN; ++ fw_hdr++; ++ } ++ ++ do { ++ u32 *fw_data = (u32 *)(fw_hdr + 1); ++ for (i = 0; i < tg3_fw_data_len(fw_hdr); i++) ++ write_op(tp, cpu_scratch_base + ++ (fw_hdr->base_addr & 0xffff) + ++ (i * sizeof(u32)), ++ fw_data[i]); ++ ++ total_len -= fw_hdr->len; ++ ++ /* Advance to next fragment */ ++ fw_hdr = (struct tg3_firmware_hdr *) ++ ((void *)fw_hdr + fw_hdr->len); ++ } while (total_len > 0); ++ ++ err = 0; ++ ++out: ++ return err; ++} ++ ++/* tp->lock is held. */ ++static int tg3_pause_cpu_and_set_pc(struct tg3 *tp, u32 cpu_base, u32 pc) ++{ ++ int i; ++ const int iters = 5; ++ ++ tw32(cpu_base + CPU_STATE, 0xffffffff); ++ tw32_f(cpu_base + CPU_PC, pc); ++ ++ for (i = 0; i < iters; i++) { ++ if (tr32(cpu_base + CPU_PC) == pc) ++ break; ++ tw32(cpu_base + CPU_STATE, 0xffffffff); ++ tw32(cpu_base + CPU_MODE, CPU_MODE_HALT); ++ tw32_f(cpu_base + CPU_PC, pc); ++ udelay(1000); ++ } ++ ++ return (i == iters) ? -EBUSY : 0; ++} ++ ++/* tp->lock is held. */ ++static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp) ++{ ++ const struct tg3_firmware_hdr *fw_hdr; ++ int err; ++ ++ fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data; ++ ++ /* Firmware blob starts with version numbers, followed by ++ start address and length. We are setting complete length. ++ length = end_address_of_bss - start_address_of_text. ++ Remainder is the blob to be loaded contiguously ++ from start address. */ ++ ++ err = tg3_load_firmware_cpu(tp, RX_CPU_BASE, ++ RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE, ++ fw_hdr); ++ if (err) ++ return err; ++ ++ err = tg3_load_firmware_cpu(tp, TX_CPU_BASE, ++ TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE, ++ fw_hdr); ++ if (err) ++ return err; ++ ++ /* Now startup only the RX cpu. */ ++ err = tg3_pause_cpu_and_set_pc(tp, RX_CPU_BASE, ++ fw_hdr->base_addr); ++ if (err) { ++ netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x " ++ "should be %08x\n", __func__, ++ tr32(RX_CPU_BASE + CPU_PC), ++ fw_hdr->base_addr); ++ return -ENODEV; ++ } ++ ++ tg3_rxcpu_resume(tp); ++ ++ return 0; ++} ++ ++static int tg3_validate_rxcpu_state(struct tg3 *tp) ++{ ++ const int iters = 1000; ++ int i; ++ u32 val; ++ ++ /* Wait for boot code to complete initialization and enter service ++ * loop. It is then safe to download service patches ++ */ ++ for (i = 0; i < iters; i++) { ++ if (tr32(RX_CPU_HWBKPT) == TG3_SBROM_IN_SERVICE_LOOP) ++ break; ++ ++ udelay(10); ++ } ++ ++ if (i == iters) { ++ netdev_err(tp->dev, "Boot code not ready for service patches\n"); ++ return -EBUSY; ++ } ++ ++ val = tg3_read_indirect_reg32(tp, TG3_57766_FW_HANDSHAKE); ++ if (val & 0xff) { ++ netdev_warn(tp->dev, ++ "Other patches exist. Not downloading EEE patch\n"); ++ return -EEXIST; ++ } ++ ++ return 0; ++} ++ ++/* tp->lock is held. */ ++static void tg3_load_57766_firmware(struct tg3 *tp) ++{ ++ struct tg3_firmware_hdr *fw_hdr; ++ ++ if (!tg3_flag(tp, NO_NVRAM)) ++ return; ++ ++ if (tg3_validate_rxcpu_state(tp)) ++ return; ++ ++ if (!tp->fw) ++ return; ++ ++ /* This firmware blob has a different format than older firmware ++ * releases as given below. The main difference is we have fragmented ++ * data to be written to non-contiguous locations. ++ * ++ * In the beginning we have a firmware header identical to other ++ * firmware which consists of version, base addr and length. The length ++ * here is unused and set to 0xffffffff. ++ * ++ * This is followed by a series of firmware fragments which are ++ * individually identical to previous firmware. i.e. they have the ++ * firmware header and followed by data for that fragment. The version ++ * field of the individual fragment header is unused. ++ */ ++ ++ fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data; ++ if (fw_hdr->base_addr != TG3_57766_FW_BASE_ADDR) ++ return; ++ ++ if (tg3_rxcpu_pause(tp)) ++ return; ++ ++ /* tg3_load_firmware_cpu() will always succeed for the 57766 */ ++ tg3_load_firmware_cpu(tp, 0, TG3_57766_FW_BASE_ADDR, 0, fw_hdr); ++ ++ tg3_rxcpu_resume(tp); ++} ++ ++#if TG3_TSO_SUPPORT != 0 ++ ++/* tp->lock is held. */ ++static int tg3_load_tso_firmware(struct tg3 *tp) ++{ ++ const struct tg3_firmware_hdr *fw_hdr; ++ unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size; ++ int err; ++ ++ if (!tg3_flag(tp, FW_TSO)) ++ return 0; ++ ++ fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data; ++ ++ /* Firmware blob starts with version numbers, followed by ++ start address and length. We are setting complete length. ++ length = end_address_of_bss - start_address_of_text. ++ Remainder is the blob to be loaded contiguously ++ from start address. */ ++ ++ if (tg3_asic_rev(tp) == ASIC_REV_5705) { ++ cpu_base = RX_CPU_BASE; ++ cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705; ++ cpu_scratch_size = (tp->fw->size - TG3_FW_HDR_LEN + ++ TG3_TSO5_FW_SBSS_LEN + ++ TG3_TSO5_FW_BSS_LEN); ++ } else { ++ cpu_base = TX_CPU_BASE; ++ cpu_scratch_base = TX_CPU_SCRATCH_BASE; ++ cpu_scratch_size = TX_CPU_SCRATCH_SIZE; ++ } ++ ++ err = tg3_load_firmware_cpu(tp, cpu_base, ++ cpu_scratch_base, cpu_scratch_size, ++ fw_hdr); ++ if (err) ++ return err; ++ ++ /* Now startup the cpu. */ ++ err = tg3_pause_cpu_and_set_pc(tp, cpu_base, ++ fw_hdr->base_addr); ++ if (err) { ++ netdev_err(tp->dev, ++ "%s fails to set CPU PC, is %08x should be %08x\n", ++ __func__, tr32(cpu_base + CPU_PC), ++ fw_hdr->base_addr); ++ return -ENODEV; ++ } ++ ++ tg3_resume_cpu(tp, cpu_base); ++ return 0; ++} ++ ++#endif /* TG3_TSO_SUPPORT != 0 */ ++ ++/* tp->lock is held. */ ++static void __tg3_set_one_mac_addr(struct tg3 *tp, u8 *mac_addr, int index) ++{ ++ u32 addr_high, addr_low; ++ ++ addr_high = ((mac_addr[0] << 8) | mac_addr[1]); ++ addr_low = ((mac_addr[2] << 24) | (mac_addr[3] << 16) | ++ (mac_addr[4] << 8) | mac_addr[5]); ++ ++ if (index < 4) { ++ tw32(MAC_ADDR_0_HIGH + (index * 8), addr_high); ++ tw32(MAC_ADDR_0_LOW + (index * 8), addr_low); ++ } else { ++ index -= 4; ++ tw32(MAC_EXTADDR_0_HIGH + (index * 8), addr_high); ++ tw32(MAC_EXTADDR_0_LOW + (index * 8), addr_low); ++ } ++} ++ ++/* tp->lock is held. */ ++static void __tg3_set_mac_addr(struct tg3 *tp, bool skip_mac_1) ++{ ++ u32 addr_high; ++ int i; ++ ++ for (i = 0; i < 4; i++) { ++ if (i == 1 && skip_mac_1) ++ continue; ++ __tg3_set_one_mac_addr(tp, tp->dev->dev_addr, i); ++ } ++ ++ if (tg3_asic_rev(tp) == ASIC_REV_5703 || ++ tg3_asic_rev(tp) == ASIC_REV_5704) { ++ for (i = 4; i < 16; i++) ++ __tg3_set_one_mac_addr(tp, tp->dev->dev_addr, i); ++ } ++ ++ addr_high = (tp->dev->dev_addr[0] + ++ tp->dev->dev_addr[1] + ++ tp->dev->dev_addr[2] + ++ tp->dev->dev_addr[3] + ++ tp->dev->dev_addr[4] + ++ tp->dev->dev_addr[5]) & ++ TX_BACKOFF_SEED_MASK; ++ tw32(MAC_TX_BACKOFF_SEED, addr_high); ++} ++ ++static void tg3_enable_register_access(struct tg3 *tp) ++{ ++ /* ++ * Make sure register accesses (indirect or otherwise) will function ++ * correctly. ++ */ ++ pci_write_config_dword(tp->pdev, ++ TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl); ++} ++ ++static int tg3_power_up(struct tg3 *tp) ++{ ++ int err; ++ ++ tg3_enable_register_access(tp); ++ ++ /* Kernels less than around 2.6.37 still need this */ ++ pci_enable_wake(tp->pdev, PCI_D0, false); ++ ++ err = pci_set_power_state(tp->pdev, PCI_D0); ++ if (!err) { ++ /* Switch out of Vaux if it is a NIC */ ++ tg3_pwrsrc_switch_to_vmain(tp); ++ } else { ++ netdev_err(tp->dev, "Transition to D0 failed\n"); ++ } ++ ++ return err; ++} ++ ++static void tg3_power_down(struct tg3 *tp) ++{ ++ pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE)); ++ pci_set_power_state(tp->pdev, PCI_D3hot); ++} ++ ++static int tg3_setup_phy(struct tg3 *, bool); ++ ++static int tg3_power_down_prepare(struct tg3 *tp) ++{ ++ u32 misc_host_ctrl; ++ bool device_should_wake, do_low_power; ++ ++ tg3_enable_register_access(tp); ++ ++ /* Restore the CLKREQ setting. */ ++ if (tg3_flag(tp, CLKREQ_BUG)) ++ pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL, ++ PCI_EXP_LNKCTL_CLKREQ_EN); ++ ++ misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL); ++ tw32(TG3PCI_MISC_HOST_CTRL, ++ misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT); ++ ++ device_should_wake = device_may_wakeup(&tp->pdev->dev) && ++ tg3_flag(tp, WOL_ENABLE); ++ ++#ifdef BCM_INCLUDE_PHYLIB_SUPPORT ++ if (tg3_flag(tp, USE_PHYLIB)) { ++ do_low_power = false; ++ if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) && ++ !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) { ++ struct phy_device *phydev; ++ u32 phyid, advertising; ++ ++ phydev = tp->mdio_bus->phy_map[tp->phy_addr]; ++ ++ tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER; ++ ++ tp->link_config.speed = phydev->speed; ++ tp->link_config.duplex = phydev->duplex; ++ tp->link_config.autoneg = phydev->autoneg; ++ tp->link_config.advertising = phydev->advertising; ++ ++ advertising = ADVERTISED_TP | ++ ADVERTISED_Pause | ++ ADVERTISED_Autoneg | ++ ADVERTISED_10baseT_Half; ++ ++ if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) { ++ if (tg3_flag(tp, WOL_SPEED_100MB)) ++ advertising |= ++ ADVERTISED_100baseT_Half | ++ ADVERTISED_100baseT_Full | ++ ADVERTISED_10baseT_Full; ++ else ++ advertising |= ADVERTISED_10baseT_Full; ++ } ++ ++ phydev->advertising = advertising; ++ ++ phy_start_aneg(phydev); ++ ++ phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask; ++ if (phyid != PHY_ID_BCMAC131) { ++ phyid &= PHY_BCM_OUI_MASK; ++ if (phyid == PHY_BCM_OUI_1 || ++ phyid == PHY_BCM_OUI_2 || ++ phyid == PHY_BCM_OUI_3) ++ do_low_power = true; ++ } ++ } ++ } else ++#endif /* BCM_INCLUDE_PHYLIB_SUPPORT */ ++ { ++ do_low_power = true; ++ ++ if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) ++ tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER; ++ ++ if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) ++ tg3_setup_phy(tp, false); ++ } ++ ++ if (tg3_asic_rev(tp) == ASIC_REV_5906) { ++ u32 val; ++ ++ val = tr32(GRC_VCPU_EXT_CTRL); ++ tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL); ++ } else if (!tg3_flag(tp, ENABLE_ASF)) { ++ int i; ++ u32 val; ++ ++ for (i = 0; i < 200; i++) { ++ tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val); ++ if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1) ++ break; ++ msleep(1); ++ } ++ } ++ if (tg3_flag(tp, WOL_CAP)) ++ tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE | ++ WOL_DRV_STATE_SHUTDOWN | ++ WOL_DRV_WOL | ++ WOL_SET_MAGIC_PKT); ++ ++ if (device_should_wake) { ++ u32 mac_mode; ++ ++ if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) { ++ if (do_low_power && ++ !(tp->phy_flags & TG3_PHYFLG_IS_FET)) { ++ tg3_phy_auxctl_write(tp, ++ MII_TG3_AUXCTL_SHDWSEL_PWRCTL, ++ MII_TG3_AUXCTL_PCTL_WOL_EN | ++ MII_TG3_AUXCTL_PCTL_100TX_LPWR | ++ MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC); ++ udelay(40); ++ } ++ ++ if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) ++ mac_mode = MAC_MODE_PORT_MODE_GMII; ++ else if (tp->phy_flags & ++ TG3_PHYFLG_KEEP_LINK_ON_PWRDN) { ++ if (tp->link_config.active_speed == SPEED_1000) ++ mac_mode = MAC_MODE_PORT_MODE_GMII; ++ else ++ mac_mode = MAC_MODE_PORT_MODE_MII; ++ } else ++ mac_mode = MAC_MODE_PORT_MODE_MII; ++ ++ mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY; ++ if (tg3_asic_rev(tp) == ASIC_REV_5700) { ++ u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ? ++ SPEED_100 : SPEED_10; ++ if (tg3_5700_link_polarity(tp, speed)) ++ mac_mode |= MAC_MODE_LINK_POLARITY; ++ else ++ mac_mode &= ~MAC_MODE_LINK_POLARITY; ++ } ++ } else { ++ mac_mode = MAC_MODE_PORT_MODE_TBI; ++ } ++ ++ if (!tg3_flag(tp, 5750_PLUS)) ++ tw32(MAC_LED_CTRL, tp->led_ctrl); ++ ++ mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE; ++ if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) && ++ (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE))) ++ mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL; ++ ++ if (tg3_flag(tp, ENABLE_APE)) ++ mac_mode |= MAC_MODE_APE_TX_EN | ++ MAC_MODE_APE_RX_EN | ++ MAC_MODE_TDE_ENABLE; ++ ++ tw32_f(MAC_MODE, mac_mode); ++ udelay(100); ++ ++ tw32_f(MAC_RX_MODE, RX_MODE_ENABLE); ++ udelay(10); ++ } ++ ++ if (!tg3_flag(tp, WOL_SPEED_100MB) && ++ (tg3_asic_rev(tp) == ASIC_REV_5700 || ++ tg3_asic_rev(tp) == ASIC_REV_5701)) { ++ u32 base_val; ++ ++ base_val = tp->pci_clock_ctrl; ++ base_val |= (CLOCK_CTRL_RXCLK_DISABLE | ++ CLOCK_CTRL_TXCLK_DISABLE); ++ ++ tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK | ++ CLOCK_CTRL_PWRDOWN_PLL133, 40); ++ } else if (tg3_flag(tp, 5780_CLASS) || ++ tg3_flag(tp, CPMU_PRESENT) || ++ tg3_asic_rev(tp) == ASIC_REV_5906) { ++ /* do nothing */ ++ } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) { ++ u32 newbits1, newbits2; ++ ++ if (tg3_asic_rev(tp) == ASIC_REV_5700 || ++ tg3_asic_rev(tp) == ASIC_REV_5701) { ++ newbits1 = (CLOCK_CTRL_RXCLK_DISABLE | ++ CLOCK_CTRL_TXCLK_DISABLE | ++ CLOCK_CTRL_ALTCLK); ++ newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE; ++ } else if (tg3_flag(tp, 5705_PLUS)) { ++ newbits1 = CLOCK_CTRL_625_CORE; ++ newbits2 = newbits1 | CLOCK_CTRL_ALTCLK; ++ } else { ++ newbits1 = CLOCK_CTRL_ALTCLK; ++ newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE; ++ } ++ ++ tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1, ++ 40); ++ ++ tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2, ++ 40); ++ ++ if (!tg3_flag(tp, 5705_PLUS)) { ++ u32 newbits3; ++ ++ if (tg3_asic_rev(tp) == ASIC_REV_5700 || ++ tg3_asic_rev(tp) == ASIC_REV_5701) { ++ newbits3 = (CLOCK_CTRL_RXCLK_DISABLE | ++ CLOCK_CTRL_TXCLK_DISABLE | ++ CLOCK_CTRL_44MHZ_CORE); ++ } else { ++ newbits3 = CLOCK_CTRL_44MHZ_CORE; ++ } ++ ++ tw32_wait_f(TG3PCI_CLOCK_CTRL, ++ tp->pci_clock_ctrl | newbits3, 40); ++ } ++ } ++ ++ if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF)) ++ tg3_power_down_phy(tp, do_low_power); ++ ++ tg3_frob_aux_power(tp, true); ++ ++ /* Workaround for unstable PLL clock */ ++ if ((!tg3_flag(tp, IS_SSB_CORE)) && ++ ((tg3_chip_rev(tp) == CHIPREV_5750_AX) || ++ (tg3_chip_rev(tp) == CHIPREV_5750_BX))) { ++ u32 val = tr32(0x7d00); ++ ++ val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1); ++ tw32(0x7d00, val); ++ if (!tg3_flag(tp, ENABLE_ASF)) { ++ int err; ++ ++ err = tg3_nvram_lock(tp); ++ tg3_halt_cpu(tp, RX_CPU_BASE); ++ if (!err) ++ tg3_nvram_unlock(tp); ++ } ++ } ++ ++ tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN); ++ ++ tg3_ape_driver_state_change(tp, RESET_KIND_SHUTDOWN); ++ ++ return 0; ++} ++ ++static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex) ++{ ++ switch (val & MII_TG3_AUX_STAT_SPDMASK) { ++ case MII_TG3_AUX_STAT_10HALF: ++ *speed = SPEED_10; ++ *duplex = DUPLEX_HALF; ++ break; ++ ++ case MII_TG3_AUX_STAT_10FULL: ++ *speed = SPEED_10; ++ *duplex = DUPLEX_FULL; ++ break; ++ ++ case MII_TG3_AUX_STAT_100HALF: ++ *speed = SPEED_100; ++ *duplex = DUPLEX_HALF; ++ break; ++ ++ case MII_TG3_AUX_STAT_100FULL: ++ *speed = SPEED_100; ++ *duplex = DUPLEX_FULL; ++ break; ++ ++ case MII_TG3_AUX_STAT_1000HALF: ++ *speed = SPEED_1000; ++ *duplex = DUPLEX_HALF; ++ break; ++ ++ case MII_TG3_AUX_STAT_1000FULL: ++ *speed = SPEED_1000; ++ *duplex = DUPLEX_FULL; ++ break; ++ ++ default: ++ if (tp->phy_flags & TG3_PHYFLG_IS_FET) { ++ *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 : ++ SPEED_10; ++ *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL : ++ DUPLEX_HALF; ++ break; ++ } ++ *speed = SPEED_UNKNOWN; ++ *duplex = DUPLEX_UNKNOWN; ++ break; ++ } ++} ++ ++static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl) ++{ ++ int err = 0; ++ u32 val, new_adv; ++ ++ new_adv = ADVERTISE_CSMA; ++ new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL; ++ new_adv |= mii_advertise_flowctrl(flowctrl); ++ ++ err = tg3_writephy(tp, MII_ADVERTISE, new_adv); ++ if (err) ++ goto done; ++ ++ if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) { ++ new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise); ++ ++ if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 || ++ tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0) ++ new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER; ++ ++ err = tg3_writephy(tp, MII_CTRL1000, new_adv); ++ if (err) ++ goto done; ++ } ++ ++ if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) ++ goto done; ++ ++#ifndef BCM_INCLUDE_PHYLIB_SUPPORT ++ if ((tp->phy_id & TG3_PHY_ID_MASK) != TG3_PHY_ID_BCM50612E) ++#endif ++ tw32(TG3_CPMU_EEE_MODE, ++ tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE); ++ ++ err = tg3_phy_toggle_auxctl_smdsp(tp, true); ++ if (!err) { ++ u32 err2; ++ ++ val = 0; ++ /* Advertise 100-BaseTX EEE ability */ ++ if (advertise & ADVERTISED_100baseT_Full) ++ val |= MDIO_AN_EEE_ADV_100TX; ++ /* Advertise 1000-BaseT EEE ability */ ++ if (advertise & ADVERTISED_1000baseT_Full) ++ val |= MDIO_AN_EEE_ADV_1000T; ++ ++ if (!tp->eee.eee_enabled) { ++ val = 0; ++ tp->eee.advertised = 0; ++ } else { ++ tp->eee.advertised = advertise & ++ (ADVERTISED_100baseT_Full | ++ ADVERTISED_1000baseT_Full); ++ } ++ ++ err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val); ++ if (err) ++ goto err_out; ++ ++ switch (tg3_asic_rev(tp)) { ++ case ASIC_REV_5717: ++ case ASIC_REV_57765: ++ case ASIC_REV_57766: ++ case ASIC_REV_5719: ++ /* If we advertised any eee advertisements above... */ ++ if (val) ++ val = MII_TG3_DSP_TAP26_ALNOKO | ++ MII_TG3_DSP_TAP26_RMRXSTO | ++ MII_TG3_DSP_TAP26_OPCSINPT; ++ tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val); ++ /* Fall through */ ++ case ASIC_REV_5720: ++ case ASIC_REV_5762: ++ if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val)) ++ tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val | ++ MII_TG3_DSP_CH34TP2_HIBW01); ++ } ++ ++#ifndef BCM_INCLUDE_PHYLIB_SUPPORT ++ if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM50612E) { ++ tg3_writephy(tp, MII_TG3_DSP_ADDRESS, MII_TG3_DSP_TLER); ++ tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &val); ++ if (tp->link_config.autoneg == AUTONEG_ENABLE) ++ val |= MII_TG3_DSP_TLER_AUTOGREEEN_EN; ++ else ++ val &= ~MII_TG3_DSP_TLER_AUTOGREEEN_EN; ++ tg3_phydsp_write(tp, MII_TG3_DSP_TLER, val); ++ } ++#endif /* BCM_INCLUDE_PHYLIB_SUPPORT */ ++ ++err_out: ++ err2 = tg3_phy_toggle_auxctl_smdsp(tp, false); ++ if (!err) ++ err = err2; ++ } ++ ++done: ++ return err; ++} ++ ++static void tg3_phy_copper_begin(struct tg3 *tp) ++{ ++ if (tp->link_config.autoneg == AUTONEG_ENABLE || ++ (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) { ++ u32 adv, fc; ++ ++ if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) && ++ !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) { ++ adv = ADVERTISED_10baseT_Half | ++ ADVERTISED_10baseT_Full; ++ if (tg3_flag(tp, WOL_SPEED_100MB)) ++ adv |= ADVERTISED_100baseT_Half | ++ ADVERTISED_100baseT_Full; ++ if (tp->phy_flags & TG3_PHYFLG_1G_ON_VAUX_OK) { ++ if (!(tp->phy_flags & ++ TG3_PHYFLG_DISABLE_1G_HD_ADV)) ++ adv |= ADVERTISED_1000baseT_Half; ++ adv |= ADVERTISED_1000baseT_Full; ++ } ++ ++ fc = FLOW_CTRL_TX | FLOW_CTRL_RX; ++ } else { ++ adv = tp->link_config.advertising; ++ if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY) ++ adv &= ~(ADVERTISED_1000baseT_Half | ++ ADVERTISED_1000baseT_Full); ++ ++ fc = tp->link_config.flowctrl; ++ } ++ ++ tg3_phy_autoneg_cfg(tp, adv, fc); ++ ++ if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) && ++ (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) { ++ /* Normally during power down we want to autonegotiate ++ * the lowest possible speed for WOL. However, to avoid ++ * link flap, we leave it untouched. ++ */ ++ return; ++ } ++ ++ tg3_writephy(tp, MII_BMCR, ++ BMCR_ANENABLE | BMCR_ANRESTART); ++ } else { ++ int i; ++ u32 bmcr, orig_bmcr; ++ ++ tp->link_config.active_speed = tp->link_config.speed; ++ tp->link_config.active_duplex = tp->link_config.duplex; ++ ++ if (tg3_asic_rev(tp) == ASIC_REV_5714) { ++ /* With autoneg disabled, 5715 only links up when the ++ * advertisement register has the configured speed ++ * enabled. ++ */ ++ tg3_writephy(tp, MII_ADVERTISE, ADVERTISE_ALL); ++ } ++ ++ bmcr = 0; ++ switch (tp->link_config.speed) { ++ default: ++ case SPEED_10: ++ break; ++ ++ case SPEED_100: ++ bmcr |= BMCR_SPEED100; ++ break; ++ ++ case SPEED_1000: ++ bmcr |= BMCR_SPEED1000; ++ break; ++ } ++ ++ if (tp->link_config.duplex == DUPLEX_FULL) ++ bmcr |= BMCR_FULLDPLX; ++ ++ if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) && ++ (bmcr != orig_bmcr)) { ++ tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK); ++ for (i = 0; i < 1500; i++) { ++ u32 tmp; ++ ++ udelay(10); ++ if (tg3_readphy(tp, MII_BMSR, &tmp) || ++ tg3_readphy(tp, MII_BMSR, &tmp)) ++ continue; ++ if (!(tmp & BMSR_LSTATUS)) { ++ udelay(40); ++ break; ++ } ++ } ++ tg3_writephy(tp, MII_BMCR, bmcr); ++ udelay(40); ++ } ++ } ++} ++ ++static int tg3_phy_pull_config(struct tg3 *tp) ++{ ++ int err; ++ u32 val; ++ ++ err = tg3_readphy(tp, MII_BMCR, &val); ++ if (err) ++ goto done; ++ ++ if (!(val & BMCR_ANENABLE)) { ++ tp->link_config.autoneg = AUTONEG_DISABLE; ++ tp->link_config.advertising = 0; ++ tg3_flag_clear(tp, PAUSE_AUTONEG); ++ ++ err = -EIO; ++ ++ switch (val & (BMCR_SPEED1000 | BMCR_SPEED100)) { ++ case 0: ++ if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) ++ goto done; ++ ++ tp->link_config.speed = SPEED_10; ++ break; ++ case BMCR_SPEED100: ++ if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) ++ goto done; ++ ++ tp->link_config.speed = SPEED_100; ++ break; ++ case BMCR_SPEED1000: ++ if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) { ++ tp->link_config.speed = SPEED_1000; ++ break; ++ } ++ /* Fall through */ ++ default: ++ goto done; ++ } ++ ++ if (val & BMCR_FULLDPLX) ++ tp->link_config.duplex = DUPLEX_FULL; ++ else ++ tp->link_config.duplex = DUPLEX_HALF; ++ ++ tp->link_config.flowctrl = FLOW_CTRL_RX | FLOW_CTRL_TX; ++ ++ err = 0; ++ goto done; ++ } ++ ++ tp->link_config.autoneg = AUTONEG_ENABLE; ++ tp->link_config.advertising = ADVERTISED_Autoneg; ++ tg3_flag_set(tp, PAUSE_AUTONEG); ++ ++ if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) { ++ u32 adv; ++ ++ err = tg3_readphy(tp, MII_ADVERTISE, &val); ++ if (err) ++ goto done; ++ ++ adv = mii_adv_to_ethtool_adv_t(val & ADVERTISE_ALL); ++ tp->link_config.advertising |= adv | ADVERTISED_TP; ++ ++ tp->link_config.flowctrl = tg3_decode_flowctrl_1000T(val); ++ } else { ++ tp->link_config.advertising |= ADVERTISED_FIBRE; ++ } ++ ++ if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) { ++ u32 adv; ++ ++ if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) { ++ err = tg3_readphy(tp, MII_CTRL1000, &val); ++ if (err) ++ goto done; ++ ++ adv = mii_ctrl1000_to_ethtool_adv_t(val); ++ } else { ++ err = tg3_readphy(tp, MII_ADVERTISE, &val); ++ if (err) ++ goto done; ++ ++ adv = tg3_decode_flowctrl_1000X(val); ++ tp->link_config.flowctrl = adv; ++ ++ val &= (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL); ++ adv = mii_adv_to_ethtool_adv_x(val); ++ } ++ ++ tp->link_config.advertising |= adv; ++ } ++ ++done: ++ return err; ++} ++ ++static int tg3_init_5401phy_dsp(struct tg3 *tp) ++{ ++ int err; ++ ++ /* Turn off tap power management. */ ++ /* Set Extended packet length bit */ ++ err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20); ++ ++ err |= tg3_phydsp_write(tp, 0x0012, 0x1804); ++ err |= tg3_phydsp_write(tp, 0x0013, 0x1204); ++ err |= tg3_phydsp_write(tp, 0x8006, 0x0132); ++ err |= tg3_phydsp_write(tp, 0x8006, 0x0232); ++ err |= tg3_phydsp_write(tp, 0x201f, 0x0a20); ++ ++ udelay(40); ++ ++ return err; ++} ++ ++static bool tg3_phy_eee_config_ok(struct tg3 *tp) ++{ ++ struct ethtool_eee eee; ++ ++ if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) ++ return true; ++ ++ tg3_eee_pull_config(tp, &eee); ++ ++ if (tp->eee.eee_enabled) { ++ if (tp->eee.advertised != eee.advertised || ++ tp->eee.tx_lpi_timer != eee.tx_lpi_timer || ++ tp->eee.tx_lpi_enabled != eee.tx_lpi_enabled) ++ return false; ++ } else { ++ /* EEE is disabled but we're advertising */ ++ if (eee.advertised) ++ return false; ++ } ++ ++ return true; ++} ++ ++static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv) ++{ ++ u32 advmsk, tgtadv, advertising; ++ ++ advertising = tp->link_config.advertising; ++ tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL; ++ ++ advmsk = ADVERTISE_ALL; ++ if (tp->link_config.active_duplex == DUPLEX_FULL) { ++ tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl); ++ advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM; ++ } ++ ++ if (tg3_readphy(tp, MII_ADVERTISE, lcladv)) ++ return false; ++ ++ if ((*lcladv & advmsk) != tgtadv) ++ return false; ++ ++ if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) { ++ u32 tg3_ctrl; ++ ++ tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising); ++ ++ if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl)) ++ return false; ++ ++ if (tgtadv && ++ (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 || ++ tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)) { ++ tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER; ++ tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL | ++ CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER); ++ } else { ++ tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL); ++ } ++ ++ if (tg3_ctrl != tgtadv) ++ return false; ++ } ++ ++ return true; ++} ++ ++static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv) ++{ ++ u32 lpeth = 0; ++ ++ if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) { ++ u32 val; ++ ++ if (tg3_readphy(tp, MII_STAT1000, &val)) ++ return false; ++ ++ lpeth = mii_stat1000_to_ethtool_lpa_t(val); ++ } ++ ++ if (tg3_readphy(tp, MII_LPA, rmtadv)) ++ return false; ++ ++ lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv); ++ tp->link_config.rmt_adv = lpeth; ++ ++ return true; ++} ++ ++static bool tg3_test_and_report_link_chg(struct tg3 *tp, bool curr_link_up) ++{ ++ if (curr_link_up != tp->link_up) { ++ if (curr_link_up) { ++ netif_carrier_on(tp->dev); ++ } else { ++ netif_carrier_off(tp->dev); ++ if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) ++ tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT; ++ } ++ ++ tg3_link_report(tp); ++ return true; ++ } ++ ++ return false; ++} ++ ++static void tg3_clear_mac_status(struct tg3 *tp) ++{ ++ tw32(MAC_EVENT, 0); ++ ++ tw32_f(MAC_STATUS, ++ MAC_STATUS_SYNC_CHANGED | ++ MAC_STATUS_CFG_CHANGED | ++ MAC_STATUS_MI_COMPLETION | ++ MAC_STATUS_LNKSTATE_CHANGED); ++ udelay(40); ++} ++ ++static void tg3_setup_eee(struct tg3 *tp) ++{ ++ u32 val; ++ ++ val = TG3_CPMU_EEE_LNKIDL_PCIE_NL0 | ++ TG3_CPMU_EEE_LNKIDL_UART_IDL; ++ if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) ++ val |= TG3_CPMU_EEE_LNKIDL_APE_TX_MT; ++ ++ tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL, val); ++ ++ tw32_f(TG3_CPMU_EEE_CTRL, ++ TG3_CPMU_EEE_CTRL_EXIT_20_1_US); ++ ++ val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET | ++ (tp->eee.tx_lpi_enabled ? TG3_CPMU_EEEMD_LPI_IN_TX : 0) | ++ TG3_CPMU_EEEMD_LPI_IN_RX | ++ TG3_CPMU_EEEMD_EEE_ENABLE; ++ ++ if (tg3_asic_rev(tp) != ASIC_REV_5717) ++ val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN; ++ ++ if (tg3_flag(tp, ENABLE_APE)) ++ val |= TG3_CPMU_EEEMD_APE_TX_DET_EN; ++ ++ tw32_f(TG3_CPMU_EEE_MODE, tp->eee.eee_enabled ? val : 0); ++ ++ tw32_f(TG3_CPMU_EEE_DBTMR1, ++ TG3_CPMU_DBTMR1_PCIEXIT_2047US | ++ (tp->eee.tx_lpi_timer & 0xffff)); ++ ++ tw32_f(TG3_CPMU_EEE_DBTMR2, ++ TG3_CPMU_DBTMR2_APE_TX_2047US | ++ TG3_CPMU_DBTMR2_TXIDXEQ_2047US); ++} ++ ++static int tg3_setup_copper_phy(struct tg3 *tp, bool force_reset) ++{ ++ bool current_link_up; ++ u32 bmsr, val; ++ u32 lcl_adv, rmt_adv; ++ u16 current_speed; ++ u8 current_duplex; ++ int i, err; ++ ++ tg3_clear_mac_status(tp); ++ ++ if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) ++ tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0); ++ ++ /* Some third-party PHYs need to be reset on link going ++ * down. ++ */ ++ if ((tg3_asic_rev(tp) == ASIC_REV_5703 || ++ tg3_asic_rev(tp) == ASIC_REV_5704 || ++ tg3_asic_rev(tp) == ASIC_REV_5705) && ++ tp->link_up) { ++ tg3_readphy(tp, MII_BMSR, &bmsr); ++ if (!tg3_readphy(tp, MII_BMSR, &bmsr) && ++ !(bmsr & BMSR_LSTATUS)) ++ force_reset = true; ++ } ++ if (force_reset) ++ tg3_phy_reset(tp); ++ ++ if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) { ++ tg3_readphy(tp, MII_BMSR, &bmsr); ++ if (tg3_readphy(tp, MII_BMSR, &bmsr) || ++ !tg3_flag(tp, INIT_COMPLETE)) ++ bmsr = 0; ++ ++ if (!(bmsr & BMSR_LSTATUS)) { ++ err = tg3_init_5401phy_dsp(tp); ++ if (err) ++ return err; ++ ++ tg3_readphy(tp, MII_BMSR, &bmsr); ++ for (i = 0; i < 1000; i++) { ++ udelay(10); ++ if (!tg3_readphy(tp, MII_BMSR, &bmsr) && ++ (bmsr & BMSR_LSTATUS)) { ++ udelay(40); ++ break; ++ } ++ } ++ ++ if ((tp->phy_id & TG3_PHY_ID_REV_MASK) == ++ TG3_PHY_REV_BCM5401_B0 && ++ !(bmsr & BMSR_LSTATUS) && ++ tp->link_config.active_speed == SPEED_1000) { ++ err = tg3_phy_reset(tp); ++ if (!err) ++ err = tg3_init_5401phy_dsp(tp); ++ if (err) ++ return err; ++ } ++ } ++ } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 || ++ tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0) { ++ /* 5701 {A0,B0} CRC bug workaround */ ++ tg3_writephy(tp, 0x15, 0x0a75); ++ tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68); ++ tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68); ++ tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68); ++ } ++ ++ /* Clear pending interrupts... */ ++ tg3_readphy(tp, MII_TG3_ISTAT, &val); ++ tg3_readphy(tp, MII_TG3_ISTAT, &val); ++ ++ if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) ++ tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG); ++ else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) ++ tg3_writephy(tp, MII_TG3_IMASK, ~0); ++ ++ if (tg3_asic_rev(tp) == ASIC_REV_5700 || ++ tg3_asic_rev(tp) == ASIC_REV_5701) { ++ if (tp->led_ctrl == LED_CTRL_MODE_PHY_1) ++ tg3_writephy(tp, MII_TG3_EXT_CTRL, ++ MII_TG3_EXT_CTRL_LNK3_LED_MODE); ++ else ++ tg3_writephy(tp, MII_TG3_EXT_CTRL, 0); ++ } ++ ++ current_link_up = false; ++ current_speed = SPEED_UNKNOWN; ++ current_duplex = DUPLEX_UNKNOWN; ++ tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE; ++ tp->link_config.rmt_adv = 0; ++ ++ if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) { ++ err = tg3_phy_auxctl_read(tp, ++ MII_TG3_AUXCTL_SHDWSEL_MISCTEST, ++ &val); ++ if (!err && !(val & (1 << 10))) { ++ tg3_phy_auxctl_write(tp, ++ MII_TG3_AUXCTL_SHDWSEL_MISCTEST, ++ val | (1 << 10)); ++ goto relink; ++ } ++ } ++ ++ bmsr = 0; ++ for (i = 0; i < 100; i++) { ++ tg3_readphy(tp, MII_BMSR, &bmsr); ++ if (!tg3_readphy(tp, MII_BMSR, &bmsr) && ++ (bmsr & BMSR_LSTATUS)) ++ break; ++ udelay(40); ++ } ++ ++ if (bmsr & BMSR_LSTATUS) { ++ u32 aux_stat, bmcr; ++ ++ tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat); ++ for (i = 0; i < 2000; i++) { ++ udelay(10); ++ if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) && ++ aux_stat) ++ break; ++ } ++ ++ tg3_aux_stat_to_speed_duplex(tp, aux_stat, ++ ¤t_speed, ++ ¤t_duplex); ++ ++ bmcr = 0; ++ for (i = 0; i < 200; i++) { ++ tg3_readphy(tp, MII_BMCR, &bmcr); ++ if (tg3_readphy(tp, MII_BMCR, &bmcr)) ++ continue; ++ if (bmcr && bmcr != 0x7fff) ++ break; ++ udelay(10); ++ } ++ ++ lcl_adv = 0; ++ rmt_adv = 0; ++ ++ tp->link_config.active_speed = current_speed; ++ tp->link_config.active_duplex = current_duplex; ++ ++ if (tp->link_config.autoneg == AUTONEG_ENABLE) { ++ bool eee_config_ok = tg3_phy_eee_config_ok(tp); ++ ++ if ((bmcr & BMCR_ANENABLE) && ++ eee_config_ok && ++ tg3_phy_copper_an_config_ok(tp, &lcl_adv) && ++ tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv)) ++ current_link_up = true; ++ ++ /* EEE settings changes take effect only after a phy ++ * reset. If we have skipped a reset due to Link Flap ++ * Avoidance being enabled, do it now. ++ */ ++ if (!eee_config_ok && ++ (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) && ++ !force_reset) { ++ tg3_setup_eee(tp); ++ tg3_phy_reset(tp); ++ } ++ } else { ++ if (!(bmcr & BMCR_ANENABLE) && ++ tp->link_config.speed == current_speed && ++ tp->link_config.duplex == current_duplex) { ++ current_link_up = true; ++ } ++ } ++ ++ if (current_link_up && ++ tp->link_config.active_duplex == DUPLEX_FULL) { ++ u32 reg, bit; ++ ++ if (tp->phy_flags & TG3_PHYFLG_IS_FET) { ++ reg = MII_TG3_FET_GEN_STAT; ++ bit = MII_TG3_FET_GEN_STAT_MDIXSTAT; ++ } else { ++ reg = MII_TG3_EXT_STAT; ++ bit = MII_TG3_EXT_STAT_MDIX; ++ } ++ ++ if (!tg3_readphy(tp, reg, &val) && (val & bit)) ++ tp->phy_flags |= TG3_PHYFLG_MDIX_STATE; ++ ++ tg3_setup_flow_control(tp, lcl_adv, rmt_adv); ++ } ++ } ++ ++relink: ++ if (!current_link_up || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) { ++ tg3_phy_copper_begin(tp); ++ ++ if (tg3_flag(tp, ROBOSWITCH)) { ++ current_link_up = true; ++ /* FIXME: when BCM5325 switch is used use 100 MBit/s */ ++ current_speed = SPEED_1000; ++ current_duplex = DUPLEX_FULL; ++ tp->link_config.active_speed = current_speed; ++ tp->link_config.active_duplex = current_duplex; ++ } ++ ++ tg3_readphy(tp, MII_BMSR, &bmsr); ++ if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) || ++ (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)) ++ current_link_up = true; ++ } ++ ++ tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK; ++ if (current_link_up) { ++ if (tp->link_config.active_speed == SPEED_100 || ++ tp->link_config.active_speed == SPEED_10) ++ tp->mac_mode |= MAC_MODE_PORT_MODE_MII; ++ else ++ tp->mac_mode |= MAC_MODE_PORT_MODE_GMII; ++ } else if ((tp->phy_flags & TG3_PHYFLG_IS_FET) || ++ tg3_asic_rev(tp) == ASIC_REV_5785) ++ tp->mac_mode |= MAC_MODE_PORT_MODE_MII; ++ else ++ tp->mac_mode |= MAC_MODE_PORT_MODE_GMII; ++ ++ /* In order for the 5750 core in BCM4785 chip to work properly ++ * in RGMII mode, the Led Control Register must be set up. ++ */ ++ if (tg3_flag(tp, RGMII_MODE)) { ++ u32 led_ctrl = tr32(MAC_LED_CTRL); ++ led_ctrl &= ~(LED_CTRL_1000MBPS_ON | LED_CTRL_100MBPS_ON); ++ ++ if (tp->link_config.active_speed == SPEED_10) ++ led_ctrl |= LED_CTRL_LNKLED_OVERRIDE; ++ else if (tp->link_config.active_speed == SPEED_100) ++ led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE | ++ LED_CTRL_100MBPS_ON); ++ else if (tp->link_config.active_speed == SPEED_1000) ++ led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE | ++ LED_CTRL_1000MBPS_ON); ++ ++ tw32(MAC_LED_CTRL, led_ctrl); ++ udelay(40); ++ } ++ ++ tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX; ++ if (tp->link_config.active_duplex == DUPLEX_HALF) ++ tp->mac_mode |= MAC_MODE_HALF_DUPLEX; ++ ++ if (tg3_asic_rev(tp) == ASIC_REV_5700) { ++ if (current_link_up && ++ tg3_5700_link_polarity(tp, tp->link_config.active_speed)) ++ tp->mac_mode |= MAC_MODE_LINK_POLARITY; ++ else ++ tp->mac_mode &= ~MAC_MODE_LINK_POLARITY; ++ } ++ ++ /* ??? Without this setting Netgear GA302T PHY does not ++ * ??? send/receive packets... ++ */ ++ if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 && ++ tg3_chip_rev_id(tp) == CHIPREV_ID_5700_ALTIMA) { ++ tp->mi_mode |= MAC_MI_MODE_AUTO_POLL; ++ tw32_f(MAC_MI_MODE, tp->mi_mode); ++ udelay(80); ++ } ++ ++ tw32_f(MAC_MODE, tp->mac_mode); ++ udelay(40); ++ ++ tg3_phy_eee_adjust(tp, current_link_up); ++ ++#ifndef BCM_INCLUDE_PHYLIB_SUPPORT ++ if (tg3_asic_rev(tp) == ASIC_REV_5785) { ++ /* A0 */ ++ if (tp->phy_id == TG3_PHY_ID_BCM50612E && ++ !tg3_phy_toggle_auxctl_smdsp(tp, true)) { ++ if (tp->link_config.active_speed == SPEED_10) ++ tg3_phydsp_write(tp, 0x0ff0, 0x2000); ++ else ++ tg3_phydsp_write(tp, 0x0ff0, 0x0000); ++ ++ tg3_phy_toggle_auxctl_smdsp(tp, false); ++ } ++ ++ if (tp->link_config.active_speed == SPEED_10) ++ tw32(MAC_MI_STAT, ++ MAC_MI_STAT_10MBPS_MODE | ++ MAC_MI_STAT_LNKSTAT_ATTN_ENAB); ++ else ++ tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB); ++ } ++#endif ++ ++ if (tg3_flag(tp, USE_LINKCHG_REG)) { ++ /* Polled via timer. */ ++ tw32_f(MAC_EVENT, 0); ++ } else { ++ tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED); ++ } ++ udelay(40); ++ ++ if (tg3_asic_rev(tp) == ASIC_REV_5700 && ++ current_link_up && ++ tp->link_config.active_speed == SPEED_1000 && ++ (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) { ++ udelay(120); ++ tw32_f(MAC_STATUS, ++ (MAC_STATUS_SYNC_CHANGED | ++ MAC_STATUS_CFG_CHANGED)); ++ udelay(40); ++ tg3_write_mem(tp, ++ NIC_SRAM_FIRMWARE_MBOX, ++ NIC_SRAM_FIRMWARE_MBOX_MAGIC2); ++ } ++ ++ /* Prevent send BD corruption. */ ++ if (tg3_flag(tp, CLKREQ_BUG)) { ++ if (tp->link_config.active_speed == SPEED_100 || ++ tp->link_config.active_speed == SPEED_10) ++ pcie_capability_clear_word(tp->pdev, PCI_EXP_LNKCTL, ++ PCI_EXP_LNKCTL_CLKREQ_EN); ++ else ++ pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL, ++ PCI_EXP_LNKCTL_CLKREQ_EN); ++ } ++ ++ tg3_test_and_report_link_chg(tp, current_link_up); ++ ++ return 0; ++} ++ ++struct tg3_fiber_aneginfo { ++ int state; ++#define ANEG_STATE_UNKNOWN 0 ++#define ANEG_STATE_AN_ENABLE 1 ++#define ANEG_STATE_RESTART_INIT 2 ++#define ANEG_STATE_RESTART 3 ++#define ANEG_STATE_DISABLE_LINK_OK 4 ++#define ANEG_STATE_ABILITY_DETECT_INIT 5 ++#define ANEG_STATE_ABILITY_DETECT 6 ++#define ANEG_STATE_ACK_DETECT_INIT 7 ++#define ANEG_STATE_ACK_DETECT 8 ++#define ANEG_STATE_COMPLETE_ACK_INIT 9 ++#define ANEG_STATE_COMPLETE_ACK 10 ++#define ANEG_STATE_IDLE_DETECT_INIT 11 ++#define ANEG_STATE_IDLE_DETECT 12 ++#define ANEG_STATE_LINK_OK 13 ++#define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14 ++#define ANEG_STATE_NEXT_PAGE_WAIT 15 ++ ++ u32 flags; ++#define MR_AN_ENABLE 0x00000001 ++#define MR_RESTART_AN 0x00000002 ++#define MR_AN_COMPLETE 0x00000004 ++#define MR_PAGE_RX 0x00000008 ++#define MR_NP_LOADED 0x00000010 ++#define MR_TOGGLE_TX 0x00000020 ++#define MR_LP_ADV_FULL_DUPLEX 0x00000040 ++#define MR_LP_ADV_HALF_DUPLEX 0x00000080 ++#define MR_LP_ADV_SYM_PAUSE 0x00000100 ++#define MR_LP_ADV_ASYM_PAUSE 0x00000200 ++#define MR_LP_ADV_REMOTE_FAULT1 0x00000400 ++#define MR_LP_ADV_REMOTE_FAULT2 0x00000800 ++#define MR_LP_ADV_NEXT_PAGE 0x00001000 ++#define MR_TOGGLE_RX 0x00002000 ++#define MR_NP_RX 0x00004000 ++ ++#define MR_LINK_OK 0x80000000 ++ ++ unsigned long link_time, cur_time; ++ ++ u32 ability_match_cfg; ++ int ability_match_count; ++ ++ char ability_match, idle_match, ack_match; ++ ++ u32 txconfig, rxconfig; ++#define ANEG_CFG_NP 0x00000080 ++#define ANEG_CFG_ACK 0x00000040 ++#define ANEG_CFG_RF2 0x00000020 ++#define ANEG_CFG_RF1 0x00000010 ++#define ANEG_CFG_PS2 0x00000001 ++#define ANEG_CFG_PS1 0x00008000 ++#define ANEG_CFG_HD 0x00004000 ++#define ANEG_CFG_FD 0x00002000 ++#define ANEG_CFG_INVAL 0x00001f06 ++ ++}; ++#define ANEG_OK 0 ++#define ANEG_DONE 1 ++#define ANEG_TIMER_ENAB 2 ++#define ANEG_FAILED -1 ++ ++#define ANEG_STATE_SETTLE_TIME 10000 ++ ++static int tg3_fiber_aneg_smachine(struct tg3 *tp, ++ struct tg3_fiber_aneginfo *ap) ++{ ++ u16 flowctrl; ++ unsigned long delta; ++ u32 rx_cfg_reg; ++ int ret; ++ ++ if (ap->state == ANEG_STATE_UNKNOWN) { ++ ap->rxconfig = 0; ++ ap->link_time = 0; ++ ap->cur_time = 0; ++ ap->ability_match_cfg = 0; ++ ap->ability_match_count = 0; ++ ap->ability_match = 0; ++ ap->idle_match = 0; ++ ap->ack_match = 0; ++ } ++ ap->cur_time++; ++ ++ if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) { ++ rx_cfg_reg = tr32(MAC_RX_AUTO_NEG); ++ ++ if (rx_cfg_reg != ap->ability_match_cfg) { ++ ap->ability_match_cfg = rx_cfg_reg; ++ ap->ability_match = 0; ++ ap->ability_match_count = 0; ++ } else { ++ if (++ap->ability_match_count > 1) { ++ ap->ability_match = 1; ++ ap->ability_match_cfg = rx_cfg_reg; ++ } ++ } ++ if (rx_cfg_reg & ANEG_CFG_ACK) ++ ap->ack_match = 1; ++ else ++ ap->ack_match = 0; ++ ++ ap->idle_match = 0; ++ } else { ++ ap->idle_match = 1; ++ ap->ability_match_cfg = 0; ++ ap->ability_match_count = 0; ++ ap->ability_match = 0; ++ ap->ack_match = 0; ++ ++ rx_cfg_reg = 0; ++ } ++ ++ ap->rxconfig = rx_cfg_reg; ++ ret = ANEG_OK; ++ ++ switch (ap->state) { ++ case ANEG_STATE_UNKNOWN: ++ if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN)) ++ ap->state = ANEG_STATE_AN_ENABLE; ++ ++ /* fallthru */ ++ case ANEG_STATE_AN_ENABLE: ++ ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX); ++ if (ap->flags & MR_AN_ENABLE) { ++ ap->link_time = 0; ++ ap->cur_time = 0; ++ ap->ability_match_cfg = 0; ++ ap->ability_match_count = 0; ++ ap->ability_match = 0; ++ ap->idle_match = 0; ++ ap->ack_match = 0; ++ ++ ap->state = ANEG_STATE_RESTART_INIT; ++ } else { ++ ap->state = ANEG_STATE_DISABLE_LINK_OK; ++ } ++ break; ++ ++ case ANEG_STATE_RESTART_INIT: ++ ap->link_time = ap->cur_time; ++ ap->flags &= ~(MR_NP_LOADED); ++ ap->txconfig = 0; ++ tw32(MAC_TX_AUTO_NEG, 0); ++ tp->mac_mode |= MAC_MODE_SEND_CONFIGS; ++ tw32_f(MAC_MODE, tp->mac_mode); ++ udelay(40); ++ ++ ret = ANEG_TIMER_ENAB; ++ ap->state = ANEG_STATE_RESTART; ++ ++ /* fallthru */ ++ case ANEG_STATE_RESTART: ++ delta = ap->cur_time - ap->link_time; ++ if (delta > ANEG_STATE_SETTLE_TIME) ++ ap->state = ANEG_STATE_ABILITY_DETECT_INIT; ++ else ++ ret = ANEG_TIMER_ENAB; ++ break; ++ ++ case ANEG_STATE_DISABLE_LINK_OK: ++ ret = ANEG_DONE; ++ break; ++ ++ case ANEG_STATE_ABILITY_DETECT_INIT: ++ ap->flags &= ~(MR_TOGGLE_TX); ++ ap->txconfig = ANEG_CFG_FD; ++ flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl); ++ if (flowctrl & ADVERTISE_1000XPAUSE) ++ ap->txconfig |= ANEG_CFG_PS1; ++ if (flowctrl & ADVERTISE_1000XPSE_ASYM) ++ ap->txconfig |= ANEG_CFG_PS2; ++ tw32(MAC_TX_AUTO_NEG, ap->txconfig); ++ tp->mac_mode |= MAC_MODE_SEND_CONFIGS; ++ tw32_f(MAC_MODE, tp->mac_mode); ++ udelay(40); ++ ++ ap->state = ANEG_STATE_ABILITY_DETECT; ++ break; ++ ++ case ANEG_STATE_ABILITY_DETECT: ++ if (ap->ability_match != 0 && ap->rxconfig != 0) ++ ap->state = ANEG_STATE_ACK_DETECT_INIT; ++ break; ++ ++ case ANEG_STATE_ACK_DETECT_INIT: ++ ap->txconfig |= ANEG_CFG_ACK; ++ tw32(MAC_TX_AUTO_NEG, ap->txconfig); ++ tp->mac_mode |= MAC_MODE_SEND_CONFIGS; ++ tw32_f(MAC_MODE, tp->mac_mode); ++ udelay(40); ++ ++ ap->state = ANEG_STATE_ACK_DETECT; ++ ++ /* fallthru */ ++ case ANEG_STATE_ACK_DETECT: ++ if (ap->ack_match != 0) { ++ if ((ap->rxconfig & ~ANEG_CFG_ACK) == ++ (ap->ability_match_cfg & ~ANEG_CFG_ACK)) { ++ ap->state = ANEG_STATE_COMPLETE_ACK_INIT; ++ } else { ++ ap->state = ANEG_STATE_AN_ENABLE; ++ } ++ } else if (ap->ability_match != 0 && ++ ap->rxconfig == 0) { ++ ap->state = ANEG_STATE_AN_ENABLE; ++ } ++ break; ++ ++ case ANEG_STATE_COMPLETE_ACK_INIT: ++ if (ap->rxconfig & ANEG_CFG_INVAL) { ++ ret = ANEG_FAILED; ++ break; ++ } ++ ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX | ++ MR_LP_ADV_HALF_DUPLEX | ++ MR_LP_ADV_SYM_PAUSE | ++ MR_LP_ADV_ASYM_PAUSE | ++ MR_LP_ADV_REMOTE_FAULT1 | ++ MR_LP_ADV_REMOTE_FAULT2 | ++ MR_LP_ADV_NEXT_PAGE | ++ MR_TOGGLE_RX | ++ MR_NP_RX); ++ if (ap->rxconfig & ANEG_CFG_FD) ++ ap->flags |= MR_LP_ADV_FULL_DUPLEX; ++ if (ap->rxconfig & ANEG_CFG_HD) ++ ap->flags |= MR_LP_ADV_HALF_DUPLEX; ++ if (ap->rxconfig & ANEG_CFG_PS1) ++ ap->flags |= MR_LP_ADV_SYM_PAUSE; ++ if (ap->rxconfig & ANEG_CFG_PS2) ++ ap->flags |= MR_LP_ADV_ASYM_PAUSE; ++ if (ap->rxconfig & ANEG_CFG_RF1) ++ ap->flags |= MR_LP_ADV_REMOTE_FAULT1; ++ if (ap->rxconfig & ANEG_CFG_RF2) ++ ap->flags |= MR_LP_ADV_REMOTE_FAULT2; ++ if (ap->rxconfig & ANEG_CFG_NP) ++ ap->flags |= MR_LP_ADV_NEXT_PAGE; ++ ++ ap->link_time = ap->cur_time; ++ ++ ap->flags ^= (MR_TOGGLE_TX); ++ if (ap->rxconfig & 0x0008) ++ ap->flags |= MR_TOGGLE_RX; ++ if (ap->rxconfig & ANEG_CFG_NP) ++ ap->flags |= MR_NP_RX; ++ ap->flags |= MR_PAGE_RX; ++ ++ ap->state = ANEG_STATE_COMPLETE_ACK; ++ ret = ANEG_TIMER_ENAB; ++ break; ++ ++ case ANEG_STATE_COMPLETE_ACK: ++ if (ap->ability_match != 0 && ++ ap->rxconfig == 0) { ++ ap->state = ANEG_STATE_AN_ENABLE; ++ break; ++ } ++ delta = ap->cur_time - ap->link_time; ++ if (delta > ANEG_STATE_SETTLE_TIME) { ++ if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) { ++ ap->state = ANEG_STATE_IDLE_DETECT_INIT; ++ } else { ++ if ((ap->txconfig & ANEG_CFG_NP) == 0 && ++ !(ap->flags & MR_NP_RX)) { ++ ap->state = ANEG_STATE_IDLE_DETECT_INIT; ++ } else { ++ ret = ANEG_FAILED; ++ } ++ } ++ } ++ break; ++ ++ case ANEG_STATE_IDLE_DETECT_INIT: ++ ap->link_time = ap->cur_time; ++ tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS; ++ tw32_f(MAC_MODE, tp->mac_mode); ++ udelay(40); ++ ++ ap->state = ANEG_STATE_IDLE_DETECT; ++ ret = ANEG_TIMER_ENAB; ++ break; ++ ++ case ANEG_STATE_IDLE_DETECT: ++ if (ap->ability_match != 0 && ++ ap->rxconfig == 0) { ++ ap->state = ANEG_STATE_AN_ENABLE; ++ break; ++ } ++ delta = ap->cur_time - ap->link_time; ++ if (delta > ANEG_STATE_SETTLE_TIME) { ++ /* XXX another gem from the Broadcom driver :( */ ++ ap->state = ANEG_STATE_LINK_OK; ++ } ++ break; ++ ++ case ANEG_STATE_LINK_OK: ++ ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK); ++ ret = ANEG_DONE; ++ break; ++ ++ case ANEG_STATE_NEXT_PAGE_WAIT_INIT: ++ /* ??? unimplemented */ ++ break; ++ ++ case ANEG_STATE_NEXT_PAGE_WAIT: ++ /* ??? unimplemented */ ++ break; ++ ++ default: ++ ret = ANEG_FAILED; ++ break; ++ } ++ ++ return ret; ++} ++ ++static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags) ++{ ++ int res = 0; ++ struct tg3_fiber_aneginfo aninfo; ++ int status = ANEG_FAILED; ++ unsigned int tick; ++ u32 tmp; ++ ++ tw32_f(MAC_TX_AUTO_NEG, 0); ++ ++ tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK; ++ tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII); ++ udelay(40); ++ ++ tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS); ++ udelay(40); ++ ++ memset(&aninfo, 0, sizeof(aninfo)); ++ aninfo.flags |= MR_AN_ENABLE; ++ aninfo.state = ANEG_STATE_UNKNOWN; ++ aninfo.cur_time = 0; ++ tick = 0; ++ while (++tick < 195000) { ++ status = tg3_fiber_aneg_smachine(tp, &aninfo); ++ if (status == ANEG_DONE || status == ANEG_FAILED) ++ break; ++ ++ udelay(1); ++ } ++ ++ tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS; ++ tw32_f(MAC_MODE, tp->mac_mode); ++ udelay(40); ++ ++ *txflags = aninfo.txconfig; ++ *rxflags = aninfo.flags; ++ ++ if (status == ANEG_DONE && ++ (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK | ++ MR_LP_ADV_FULL_DUPLEX))) ++ res = 1; ++ ++ return res; ++} ++ ++static void tg3_init_bcm8002(struct tg3 *tp) ++{ ++ u32 mac_status = tr32(MAC_STATUS); ++ int i; ++ ++ /* Reset when initting first time or we have a link. */ ++ if (tg3_flag(tp, INIT_COMPLETE) && ++ !(mac_status & MAC_STATUS_PCS_SYNCED)) ++ return; ++ ++ /* Set PLL lock range. */ ++ tg3_writephy(tp, 0x16, 0x8007); ++ ++ /* SW reset */ ++ tg3_writephy(tp, MII_BMCR, BMCR_RESET); ++ ++ /* Wait for reset to complete. */ ++ /* XXX schedule_timeout() ... */ ++ for (i = 0; i < 500; i++) ++ udelay(10); ++ ++ /* Config mode; select PMA/Ch 1 regs. */ ++ tg3_writephy(tp, 0x10, 0x8411); ++ ++ /* Enable auto-lock and comdet, select txclk for tx. */ ++ tg3_writephy(tp, 0x11, 0x0a10); ++ ++ tg3_writephy(tp, 0x18, 0x00a0); ++ tg3_writephy(tp, 0x16, 0x41ff); ++ ++ /* Assert and deassert POR. */ ++ tg3_writephy(tp, 0x13, 0x0400); ++ udelay(40); ++ tg3_writephy(tp, 0x13, 0x0000); ++ ++ tg3_writephy(tp, 0x11, 0x0a50); ++ udelay(40); ++ tg3_writephy(tp, 0x11, 0x0a10); ++ ++ /* Wait for signal to stabilize */ ++ /* XXX schedule_timeout() ... */ ++ for (i = 0; i < 15000; i++) ++ udelay(10); ++ ++ /* Deselect the channel register so we can read the PHYID ++ * later. ++ */ ++ tg3_writephy(tp, 0x10, 0x8011); ++} ++ ++static bool tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status) ++{ ++ u16 flowctrl; ++ bool current_link_up; ++ u32 sg_dig_ctrl, sg_dig_status; ++ u32 serdes_cfg, expected_sg_dig_ctrl; ++ int workaround, port_a; ++ ++ serdes_cfg = 0; ++ expected_sg_dig_ctrl = 0; ++ workaround = 0; ++ port_a = 1; ++ current_link_up = false; ++ ++ if (tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A0 && ++ tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A1) { ++ workaround = 1; ++ if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID) ++ port_a = 0; ++ ++ /* preserve bits 0-11,13,14 for signal pre-emphasis */ ++ /* preserve bits 20-23 for voltage regulator */ ++ serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff; ++ } ++ ++ sg_dig_ctrl = tr32(SG_DIG_CTRL); ++ ++ if (tp->link_config.autoneg != AUTONEG_ENABLE) { ++ if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) { ++ if (workaround) { ++ u32 val = serdes_cfg; ++ ++ if (port_a) ++ val |= 0xc010000; ++ else ++ val |= 0x4010000; ++ tw32_f(MAC_SERDES_CFG, val); ++ } ++ ++ tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP); ++ } ++ if (mac_status & MAC_STATUS_PCS_SYNCED) { ++ tg3_setup_flow_control(tp, 0, 0); ++ current_link_up = true; ++ } ++ goto out; ++ } ++ ++ /* Want auto-negotiation. */ ++ expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP; ++ ++ flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl); ++ if (flowctrl & ADVERTISE_1000XPAUSE) ++ expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP; ++ if (flowctrl & ADVERTISE_1000XPSE_ASYM) ++ expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE; ++ ++ if (sg_dig_ctrl != expected_sg_dig_ctrl) { ++ if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) && ++ tp->serdes_counter && ++ ((mac_status & (MAC_STATUS_PCS_SYNCED | ++ MAC_STATUS_RCVD_CFG)) == ++ MAC_STATUS_PCS_SYNCED)) { ++ tp->serdes_counter--; ++ current_link_up = true; ++ goto out; ++ } ++restart_autoneg: ++ if (workaround) ++ tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000); ++ tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET); ++ udelay(5); ++ tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl); ++ ++ tp->serdes_counter = SERDES_AN_TIMEOUT_5704S; ++ tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT; ++ } else if (mac_status & (MAC_STATUS_PCS_SYNCED | ++ MAC_STATUS_SIGNAL_DET)) { ++ sg_dig_status = tr32(SG_DIG_STATUS); ++ mac_status = tr32(MAC_STATUS); ++ ++ if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) && ++ (mac_status & MAC_STATUS_PCS_SYNCED)) { ++ u32 local_adv = 0, remote_adv = 0; ++ ++ if (sg_dig_ctrl & SG_DIG_PAUSE_CAP) ++ local_adv |= ADVERTISE_1000XPAUSE; ++ if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE) ++ local_adv |= ADVERTISE_1000XPSE_ASYM; ++ ++ if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE) ++ remote_adv |= LPA_1000XPAUSE; ++ if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE) ++ remote_adv |= LPA_1000XPAUSE_ASYM; ++ ++ tp->link_config.rmt_adv = ++ mii_adv_to_ethtool_adv_x(remote_adv); ++ ++ tg3_setup_flow_control(tp, local_adv, remote_adv); ++ current_link_up = true; ++ tp->serdes_counter = 0; ++ tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT; ++ } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) { ++ if (tp->serdes_counter) ++ tp->serdes_counter--; ++ else { ++ if (workaround) { ++ u32 val = serdes_cfg; ++ ++ if (port_a) ++ val |= 0xc010000; ++ else ++ val |= 0x4010000; ++ ++ tw32_f(MAC_SERDES_CFG, val); ++ } ++ ++ tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP); ++ udelay(40); ++ ++ /* Link parallel detection - link is up */ ++ /* only if we have PCS_SYNC and not */ ++ /* receiving config code words */ ++ mac_status = tr32(MAC_STATUS); ++ if ((mac_status & MAC_STATUS_PCS_SYNCED) && ++ !(mac_status & MAC_STATUS_RCVD_CFG)) { ++ tg3_setup_flow_control(tp, 0, 0); ++ current_link_up = true; ++ tp->phy_flags |= ++ TG3_PHYFLG_PARALLEL_DETECT; ++ tp->serdes_counter = ++ SERDES_PARALLEL_DET_TIMEOUT; ++ } else ++ goto restart_autoneg; ++ } ++ } ++ } else { ++ tp->serdes_counter = SERDES_AN_TIMEOUT_5704S; ++ tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT; ++ } ++ ++out: ++ return current_link_up; ++} ++ ++static bool tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status) ++{ ++ bool current_link_up = false; ++ ++ if (!(mac_status & MAC_STATUS_PCS_SYNCED)) ++ goto out; ++ ++ if (tp->link_config.autoneg == AUTONEG_ENABLE) { ++ u32 txflags, rxflags; ++ int i; ++ ++ if (fiber_autoneg(tp, &txflags, &rxflags)) { ++ u32 local_adv = 0, remote_adv = 0; ++ ++ if (txflags & ANEG_CFG_PS1) ++ local_adv |= ADVERTISE_1000XPAUSE; ++ if (txflags & ANEG_CFG_PS2) ++ local_adv |= ADVERTISE_1000XPSE_ASYM; ++ ++ if (rxflags & MR_LP_ADV_SYM_PAUSE) ++ remote_adv |= LPA_1000XPAUSE; ++ if (rxflags & MR_LP_ADV_ASYM_PAUSE) ++ remote_adv |= LPA_1000XPAUSE_ASYM; ++ ++ tp->link_config.rmt_adv = ++ mii_adv_to_ethtool_adv_x(remote_adv); ++ ++ tg3_setup_flow_control(tp, local_adv, remote_adv); ++ ++ current_link_up = true; ++ } ++ for (i = 0; i < 30; i++) { ++ udelay(20); ++ tw32_f(MAC_STATUS, ++ (MAC_STATUS_SYNC_CHANGED | ++ MAC_STATUS_CFG_CHANGED)); ++ udelay(40); ++ if ((tr32(MAC_STATUS) & ++ (MAC_STATUS_SYNC_CHANGED | ++ MAC_STATUS_CFG_CHANGED)) == 0) ++ break; ++ } ++ ++ mac_status = tr32(MAC_STATUS); ++ if (!current_link_up && ++ (mac_status & MAC_STATUS_PCS_SYNCED) && ++ !(mac_status & MAC_STATUS_RCVD_CFG)) ++ current_link_up = true; ++ } else { ++ tg3_setup_flow_control(tp, 0, 0); ++ ++ /* Forcing 1000FD link up. */ ++ current_link_up = true; ++ ++ tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS)); ++ udelay(40); ++ ++ tw32_f(MAC_MODE, tp->mac_mode); ++ udelay(40); ++ } ++ ++out: ++ return current_link_up; ++} ++ ++static int tg3_setup_fiber_phy(struct tg3 *tp, bool force_reset) ++{ ++ u32 orig_pause_cfg; ++ u16 orig_active_speed; ++ u8 orig_active_duplex; ++ u32 mac_status; ++ bool current_link_up; ++ int i; ++ ++ orig_pause_cfg = tp->link_config.active_flowctrl; ++ orig_active_speed = tp->link_config.active_speed; ++ orig_active_duplex = tp->link_config.active_duplex; ++ ++ if (!tg3_flag(tp, HW_AUTONEG) && ++ tp->link_up && ++ tg3_flag(tp, INIT_COMPLETE)) { ++ mac_status = tr32(MAC_STATUS); ++ mac_status &= (MAC_STATUS_PCS_SYNCED | ++ MAC_STATUS_SIGNAL_DET | ++ MAC_STATUS_CFG_CHANGED | ++ MAC_STATUS_RCVD_CFG); ++ if (mac_status == (MAC_STATUS_PCS_SYNCED | ++ MAC_STATUS_SIGNAL_DET)) { ++ tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED | ++ MAC_STATUS_CFG_CHANGED)); ++ return 0; ++ } ++ } ++ ++ tw32_f(MAC_TX_AUTO_NEG, 0); ++ ++ tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX); ++ tp->mac_mode |= MAC_MODE_PORT_MODE_TBI; ++ tw32_f(MAC_MODE, tp->mac_mode); ++ udelay(40); ++ ++ if (tp->phy_id == TG3_PHY_ID_BCM8002) ++ tg3_init_bcm8002(tp); ++ ++ /* Enable link change event even when serdes polling. */ ++ tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED); ++ udelay(40); ++ ++ current_link_up = false; ++ tp->link_config.rmt_adv = 0; ++ mac_status = tr32(MAC_STATUS); ++ ++ if (tg3_flag(tp, HW_AUTONEG)) ++ current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status); ++ else ++ current_link_up = tg3_setup_fiber_by_hand(tp, mac_status); ++ ++ tp->napi[0].hw_status->status = ++ (SD_STATUS_UPDATED | ++ (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG)); ++ ++ for (i = 0; i < 100; i++) { ++ tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED | ++ MAC_STATUS_CFG_CHANGED)); ++ udelay(5); ++ if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED | ++ MAC_STATUS_CFG_CHANGED | ++ MAC_STATUS_LNKSTATE_CHANGED)) == 0) ++ break; ++ } ++ ++ mac_status = tr32(MAC_STATUS); ++ if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) { ++ current_link_up = false; ++ if (tp->link_config.autoneg == AUTONEG_ENABLE && ++ tp->serdes_counter == 0) { ++ tw32_f(MAC_MODE, (tp->mac_mode | ++ MAC_MODE_SEND_CONFIGS)); ++ udelay(1); ++ tw32_f(MAC_MODE, tp->mac_mode); ++ } ++ } ++ ++ if (current_link_up) { ++ tp->link_config.active_speed = SPEED_1000; ++ tp->link_config.active_duplex = DUPLEX_FULL; ++ tw32(MAC_LED_CTRL, (tp->led_ctrl | ++ LED_CTRL_LNKLED_OVERRIDE | ++ LED_CTRL_1000MBPS_ON)); ++ } else { ++ tp->link_config.active_speed = SPEED_UNKNOWN; ++ tp->link_config.active_duplex = DUPLEX_UNKNOWN; ++ tw32(MAC_LED_CTRL, (tp->led_ctrl | ++ LED_CTRL_LNKLED_OVERRIDE | ++ LED_CTRL_TRAFFIC_OVERRIDE)); ++ } ++ ++ if (!tg3_test_and_report_link_chg(tp, current_link_up)) { ++ u32 now_pause_cfg = tp->link_config.active_flowctrl; ++ if (orig_pause_cfg != now_pause_cfg || ++ orig_active_speed != tp->link_config.active_speed || ++ orig_active_duplex != tp->link_config.active_duplex) ++ tg3_link_report(tp); ++ } ++ ++ return 0; ++} ++ ++static int tg3_setup_fiber_mii_phy(struct tg3 *tp, bool force_reset) ++{ ++ int err = 0; ++ u32 bmsr, bmcr; ++ u16 current_speed = SPEED_UNKNOWN; ++ u8 current_duplex = DUPLEX_UNKNOWN; ++ bool current_link_up = false; ++ u32 local_adv, remote_adv, sgsr; ++ ++ if ((tg3_asic_rev(tp) == ASIC_REV_5719 || ++ tg3_asic_rev(tp) == ASIC_REV_5720) && ++ !tg3_readphy(tp, SERDES_TG3_1000X_STATUS, &sgsr) && ++ (sgsr & SERDES_TG3_SGMII_MODE)) { ++ ++ if (force_reset) ++ tg3_phy_reset(tp); ++ ++ tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK; ++ ++ if (!(sgsr & SERDES_TG3_LINK_UP)) { ++ tp->mac_mode |= MAC_MODE_PORT_MODE_GMII; ++ } else { ++ current_link_up = true; ++ if (sgsr & SERDES_TG3_SPEED_1000) { ++ current_speed = SPEED_1000; ++ tp->mac_mode |= MAC_MODE_PORT_MODE_GMII; ++ } else if (sgsr & SERDES_TG3_SPEED_100) { ++ current_speed = SPEED_100; ++ tp->mac_mode |= MAC_MODE_PORT_MODE_MII; ++ } else { ++ current_speed = SPEED_10; ++ tp->mac_mode |= MAC_MODE_PORT_MODE_MII; ++ } ++ ++ if (sgsr & SERDES_TG3_FULL_DUPLEX) ++ current_duplex = DUPLEX_FULL; ++ else ++ current_duplex = DUPLEX_HALF; ++ } ++ ++ tw32_f(MAC_MODE, tp->mac_mode); ++ udelay(40); ++ ++ tg3_clear_mac_status(tp); ++ ++ goto fiber_setup_done; ++ } ++ ++ tp->mac_mode |= MAC_MODE_PORT_MODE_GMII; ++ tw32_f(MAC_MODE, tp->mac_mode); ++ udelay(40); ++ ++ tg3_clear_mac_status(tp); ++ ++ if (force_reset) ++ tg3_phy_reset(tp); ++ ++ tp->link_config.rmt_adv = 0; ++ ++ err |= tg3_readphy(tp, MII_BMSR, &bmsr); ++ err |= tg3_readphy(tp, MII_BMSR, &bmsr); ++ if (tg3_asic_rev(tp) == ASIC_REV_5714) { ++ if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP) ++ bmsr |= BMSR_LSTATUS; ++ else ++ bmsr &= ~BMSR_LSTATUS; ++ } ++ ++ err |= tg3_readphy(tp, MII_BMCR, &bmcr); ++ ++ if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset && ++ (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) { ++ /* do nothing, just check for link up at the end */ ++ } else if (tp->link_config.autoneg == AUTONEG_ENABLE) { ++ u32 adv, newadv; ++ ++ err |= tg3_readphy(tp, MII_ADVERTISE, &adv); ++ newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF | ++ ADVERTISE_1000XPAUSE | ++ ADVERTISE_1000XPSE_ASYM | ++ ADVERTISE_SLCT); ++ ++ newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl); ++ newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising); ++ ++ if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) { ++ tg3_writephy(tp, MII_ADVERTISE, newadv); ++ bmcr |= BMCR_ANENABLE | BMCR_ANRESTART; ++ tg3_writephy(tp, MII_BMCR, bmcr); ++ ++ tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED); ++ tp->serdes_counter = SERDES_AN_TIMEOUT_5714S; ++ tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT; ++ ++ return err; ++ } ++ } else { ++ u32 new_bmcr; ++ ++ bmcr &= ~BMCR_SPEED1000; ++ new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX); ++ ++ if (tp->link_config.duplex == DUPLEX_FULL) ++ new_bmcr |= BMCR_FULLDPLX; ++ ++ if (new_bmcr != bmcr) { ++ /* BMCR_SPEED1000 is a reserved bit that needs ++ * to be set on write. ++ */ ++ new_bmcr |= BMCR_SPEED1000; ++ ++ /* Force a linkdown */ ++ if (tp->link_up) { ++ u32 adv; ++ ++ err |= tg3_readphy(tp, MII_ADVERTISE, &adv); ++ adv &= ~(ADVERTISE_1000XFULL | ++ ADVERTISE_1000XHALF | ++ ADVERTISE_SLCT); ++ tg3_writephy(tp, MII_ADVERTISE, adv); ++ tg3_writephy(tp, MII_BMCR, bmcr | ++ BMCR_ANRESTART | ++ BMCR_ANENABLE); ++ udelay(10); ++ tg3_carrier_off(tp); ++ } ++ tg3_writephy(tp, MII_BMCR, new_bmcr); ++ bmcr = new_bmcr; ++ err |= tg3_readphy(tp, MII_BMSR, &bmsr); ++ err |= tg3_readphy(tp, MII_BMSR, &bmsr); ++ if (tg3_asic_rev(tp) == ASIC_REV_5714) { ++ if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP) ++ bmsr |= BMSR_LSTATUS; ++ else ++ bmsr &= ~BMSR_LSTATUS; ++ } ++ tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT; ++ } ++ } ++ ++ if (bmsr & BMSR_LSTATUS) { ++ current_speed = SPEED_1000; ++ current_link_up = true; ++ if (bmcr & BMCR_FULLDPLX) ++ current_duplex = DUPLEX_FULL; ++ else ++ current_duplex = DUPLEX_HALF; ++ ++ local_adv = 0; ++ remote_adv = 0; ++ ++ if (bmcr & BMCR_ANENABLE) { ++ u32 common; ++ ++ err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv); ++ err |= tg3_readphy(tp, MII_LPA, &remote_adv); ++ common = local_adv & remote_adv; ++ if (common & (ADVERTISE_1000XHALF | ++ ADVERTISE_1000XFULL)) { ++ if (common & ADVERTISE_1000XFULL) ++ current_duplex = DUPLEX_FULL; ++ else ++ current_duplex = DUPLEX_HALF; ++ ++ tp->link_config.rmt_adv = ++ mii_adv_to_ethtool_adv_x(remote_adv); ++ } else if (!tg3_flag(tp, 5780_CLASS)) { ++ /* Link is up via parallel detect */ ++ } else { ++ current_link_up = false; ++ } ++ } ++ } ++ ++fiber_setup_done: ++ if (current_link_up && current_duplex == DUPLEX_FULL) ++ tg3_setup_flow_control(tp, local_adv, remote_adv); ++ ++ tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX; ++ if (tp->link_config.active_duplex == DUPLEX_HALF) ++ tp->mac_mode |= MAC_MODE_HALF_DUPLEX; ++ ++ tw32_f(MAC_MODE, tp->mac_mode); ++ udelay(40); ++ ++ tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED); ++ ++ tp->link_config.active_speed = current_speed; ++ tp->link_config.active_duplex = current_duplex; ++ ++ tg3_test_and_report_link_chg(tp, current_link_up); ++ return err; ++} ++ ++static void tg3_serdes_parallel_detect(struct tg3 *tp) ++{ ++ if (tp->serdes_counter) { ++ /* Give autoneg time to complete. */ ++ tp->serdes_counter--; ++ return; ++ } ++ ++ if (!tp->link_up && ++ (tp->link_config.autoneg == AUTONEG_ENABLE)) { ++ u32 bmcr; ++ ++ tg3_readphy(tp, MII_BMCR, &bmcr); ++ if (bmcr & BMCR_ANENABLE) { ++ u32 phy1, phy2; ++ ++ /* Select shadow register 0x1f */ ++ tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00); ++ tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1); ++ ++ /* Select expansion interrupt status register */ ++ tg3_writephy(tp, MII_TG3_DSP_ADDRESS, ++ MII_TG3_DSP_EXP1_INT_STAT); ++ tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2); ++ tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2); ++ ++ if ((phy1 & 0x10) && !(phy2 & 0x20)) { ++ /* We have signal detect and not receiving ++ * config code words, link is up by parallel ++ * detection. ++ */ ++ ++ bmcr &= ~BMCR_ANENABLE; ++ bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX; ++ tg3_writephy(tp, MII_BMCR, bmcr); ++ tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT; ++ } ++ } ++ } else if (tp->link_up && ++ (tp->link_config.autoneg == AUTONEG_ENABLE) && ++ (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) { ++ u32 phy2; ++ ++ /* Select expansion interrupt status register */ ++ tg3_writephy(tp, MII_TG3_DSP_ADDRESS, ++ MII_TG3_DSP_EXP1_INT_STAT); ++ tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2); ++ if (phy2 & 0x20) { ++ u32 bmcr; ++ ++ /* Config code words received, turn on autoneg. */ ++ tg3_readphy(tp, MII_BMCR, &bmcr); ++ tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE); ++ ++ tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT; ++ ++ } ++ } ++} ++ ++static int tg3_setup_phy(struct tg3 *tp, bool force_reset) ++{ ++ u32 val; ++ int err; ++ ++ if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ++ err = tg3_setup_fiber_phy(tp, force_reset); ++ else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) ++ err = tg3_setup_fiber_mii_phy(tp, force_reset); ++ else ++ err = tg3_setup_copper_phy(tp, force_reset); ++ ++ if (tg3_chip_rev(tp) == CHIPREV_5784_AX) { ++ u32 scale; ++ ++ val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK; ++ if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5) ++ scale = 65; ++ else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25) ++ scale = 6; ++ else ++ scale = 12; ++ ++ val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK; ++ val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT); ++ tw32(GRC_MISC_CFG, val); ++ } ++ ++ val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) | ++ (6 << TX_LENGTHS_IPG_SHIFT); ++ if (tg3_asic_rev(tp) == ASIC_REV_5720 || ++ tg3_asic_rev(tp) == ASIC_REV_5762) ++ val |= tr32(MAC_TX_LENGTHS) & ++ (TX_LENGTHS_JMB_FRM_LEN_MSK | ++ TX_LENGTHS_CNT_DWN_VAL_MSK); ++ ++ if (tp->link_config.active_speed == SPEED_1000 && ++ tp->link_config.active_duplex == DUPLEX_HALF) ++ tw32(MAC_TX_LENGTHS, val | ++ (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)); ++ else ++ tw32(MAC_TX_LENGTHS, val | ++ (32 << TX_LENGTHS_SLOT_TIME_SHIFT)); ++ ++ if (!tg3_flag(tp, 5705_PLUS)) { ++ if (tp->link_up) { ++ tw32(HOSTCC_STAT_COAL_TICKS, ++ tp->coal.stats_block_coalesce_usecs); ++ } else { ++ tw32(HOSTCC_STAT_COAL_TICKS, 0); ++ } ++ } ++ ++ if (tg3_flag(tp, ASPM_WORKAROUND)) { ++ val = tr32(PCIE_PWR_MGMT_THRESH); ++ if (!tp->link_up) ++ val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) | ++ tp->pwrmgmt_thresh; ++ else ++ val |= PCIE_PWR_MGMT_L1_THRESH_MSK; ++ tw32(PCIE_PWR_MGMT_THRESH, val); ++ } ++ ++ return err; ++} ++ ++#ifdef BCM_HAS_IEEE1588_SUPPORT ++/* tp->lock must be held */ ++static u64 tg3_refclk_read(struct tg3 *tp) ++{ ++ u64 stamp = tr32(TG3_EAV_REF_CLCK_LSB); ++ return stamp | (u64)tr32(TG3_EAV_REF_CLCK_MSB) << 32; ++} ++ ++/* tp->lock must be held */ ++static void tg3_refclk_write(struct tg3 *tp, u64 newval) ++{ ++ u32 clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL); ++ ++ tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_STOP); ++ tw32(TG3_EAV_REF_CLCK_LSB, newval & 0xffffffff); ++ tw32(TG3_EAV_REF_CLCK_MSB, newval >> 32); ++ tw32_f(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_RESUME); ++} ++ ++static inline void tg3_full_lock(struct tg3 *tp, int irq_sync); ++static inline void tg3_full_unlock(struct tg3 *tp); ++#if IS_ENABLED(CONFIG_PTP_1588_CLOCK) ++#ifdef ETHTOOL_GET_TS_INFO ++static int tg3_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info) ++{ ++ struct tg3 *tp = netdev_priv(dev); ++ ++ info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | ++ SOF_TIMESTAMPING_RX_SOFTWARE | ++ SOF_TIMESTAMPING_SOFTWARE; ++ ++ if (tg3_flag(tp, PTP_CAPABLE)) { ++ info->so_timestamping |= SOF_TIMESTAMPING_TX_HARDWARE | ++ SOF_TIMESTAMPING_RX_HARDWARE | ++ SOF_TIMESTAMPING_RAW_HARDWARE; ++ } ++ ++ if (tp->ptp_clock) ++ info->phc_index = ptp_clock_index(tp->ptp_clock); ++ else ++ info->phc_index = -1; ++ ++ info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON); ++ ++ info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) | ++ (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) | ++ (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) | ++ (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT); ++ return 0; ++} ++#endif ++ ++static int tg3_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb) ++{ ++ struct tg3 *tp = container_of(ptp, struct tg3, ptp_info); ++ bool neg_adj = false; ++ u32 correction = 0; ++ ++ if (ppb < 0) { ++ neg_adj = true; ++ ppb = -ppb; ++ } ++ ++ /* Frequency adjustment is performed using hardware with a 24 bit ++ * accumulator and a programmable correction value. On each clk, the ++ * correction value gets added to the accumulator and when it ++ * overflows, the time counter is incremented/decremented. ++ * ++ * So conversion from ppb to correction value is ++ * ppb * (1 << 24) / 1000000000 ++ */ ++ correction = div_u64((u64)ppb * (1 << 24), 1000000000ULL) & ++ TG3_EAV_REF_CLK_CORRECT_MASK; ++ ++ tg3_full_lock(tp, 0); ++ ++ if (correction) ++ tw32(TG3_EAV_REF_CLK_CORRECT_CTL, ++ TG3_EAV_REF_CLK_CORRECT_EN | ++ (neg_adj ? TG3_EAV_REF_CLK_CORRECT_NEG : 0) | correction); ++ else ++ tw32(TG3_EAV_REF_CLK_CORRECT_CTL, 0); ++ ++ tg3_full_unlock(tp); ++ ++ return 0; ++} ++ ++static int tg3_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) ++{ ++ struct tg3 *tp = container_of(ptp, struct tg3, ptp_info); ++ ++ tg3_full_lock(tp, 0); ++ tp->ptp_adjust += delta; ++ tg3_full_unlock(tp); ++ ++ return 0; ++} ++ ++static int tg3_ptp_gettime(struct ptp_clock_info *ptp, struct timespec *ts) ++{ ++ u64 ns; ++ u32 remainder; ++ struct tg3 *tp = container_of(ptp, struct tg3, ptp_info); ++ ++ tg3_full_lock(tp, 0); ++ ns = tg3_refclk_read(tp); ++ ns += tp->ptp_adjust; ++ tg3_full_unlock(tp); ++ ++ ts->tv_sec = div_u64_rem(ns, 1000000000, &remainder); ++ ts->tv_nsec = remainder; ++ ++ return 0; ++} ++ ++static int tg3_ptp_settime(struct ptp_clock_info *ptp, ++ const struct timespec *ts) ++{ ++ u64 ns; ++ struct tg3 *tp = container_of(ptp, struct tg3, ptp_info); ++ ++ ns = timespec_to_ns(ts); ++ ++ tg3_full_lock(tp, 0); ++ tg3_refclk_write(tp, ns); ++ tp->ptp_adjust = 0; ++ tg3_full_unlock(tp); ++ ++ return 0; ++} ++ ++static int tg3_ptp_enable(struct ptp_clock_info *ptp, ++ struct ptp_clock_request *rq, int on) ++{ ++ struct tg3 *tp = container_of(ptp, struct tg3, ptp_info); ++ u32 clock_ctl; ++ int rval = 0; ++ ++ switch (rq->type) { ++ case PTP_CLK_REQ_PEROUT: ++ if (rq->perout.index != 0) ++ return -EINVAL; ++ ++ tg3_full_lock(tp, 0); ++ clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL); ++ clock_ctl &= ~TG3_EAV_CTL_TSYNC_GPIO_MASK; ++ ++ if (on) { ++ u64 nsec; ++ ++ nsec = rq->perout.start.sec * 1000000000ULL + ++ rq->perout.start.nsec; ++ ++ if (rq->perout.period.sec || rq->perout.period.nsec) { ++ netdev_warn(tp->dev, ++ "Device supports only a one-shot timesync output, period must be 0\n"); ++ rval = -EINVAL; ++ goto err_out; ++ } ++ ++ if (nsec & (1ULL << 63)) { ++ netdev_warn(tp->dev, ++ "Start value (nsec) is over limit. Maximum size of start is only 63 bits\n"); ++ rval = -EINVAL; ++ goto err_out; ++ } ++ ++ tw32(TG3_EAV_WATCHDOG0_LSB, (nsec & 0xffffffff)); ++ tw32(TG3_EAV_WATCHDOG0_MSB, ++ TG3_EAV_WATCHDOG0_EN | ++ ((nsec >> 32) & TG3_EAV_WATCHDOG_MSB_MASK)); ++ ++ tw32(TG3_EAV_REF_CLCK_CTL, ++ clock_ctl | TG3_EAV_CTL_TSYNC_WDOG0); ++ } else { ++ tw32(TG3_EAV_WATCHDOG0_MSB, 0); ++ tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl); ++ } ++ ++err_out: ++ tg3_full_unlock(tp); ++ return rval; ++ ++ default: ++ break; ++ } ++ ++ return -EOPNOTSUPP; ++} ++ ++static const struct ptp_clock_info tg3_ptp_caps = { ++ .owner = THIS_MODULE, ++ .name = "tg3 clock", ++ .max_adj = 250000000, ++ .n_alarm = 0, ++ .n_ext_ts = 0, ++ .n_per_out = 1, ++ .pps = 0, ++ .adjfreq = tg3_ptp_adjfreq, ++ .adjtime = tg3_ptp_adjtime, ++ .gettime = tg3_ptp_gettime, ++ .settime = tg3_ptp_settime, ++ .enable = tg3_ptp_enable, ++}; ++ ++static void tg3_hwclock_to_timestamp(struct tg3 *tp, u64 hwclock, ++ struct skb_shared_hwtstamps *timestamp) ++{ ++ memset(timestamp, 0, sizeof(struct skb_shared_hwtstamps)); ++ timestamp->hwtstamp = ns_to_ktime((hwclock & TG3_TSTAMP_MASK) + ++ tp->ptp_adjust); ++} ++ ++/* tp->lock must be held */ ++static void tg3_ptp_init(struct tg3 *tp) ++{ ++ if (!tg3_flag(tp, PTP_CAPABLE)) ++ return; ++ ++ /* Initialize the hardware clock to the system time. */ ++ tg3_refclk_write(tp, ktime_to_ns(ktime_get_real())); ++ tp->ptp_adjust = 0; ++ tp->ptp_info = tg3_ptp_caps; ++} ++ ++/* tp->lock must be held */ ++static void tg3_ptp_resume(struct tg3 *tp) ++{ ++ if (!tg3_flag(tp, PTP_CAPABLE)) ++ return; ++ ++ tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()) + tp->ptp_adjust); ++ tp->ptp_adjust = 0; ++} ++ ++static void tg3_ptp_fini(struct tg3 *tp) ++{ ++ if (!tg3_flag(tp, PTP_CAPABLE) || !tp->ptp_clock) ++ return; ++ ++ ptp_clock_unregister(tp->ptp_clock); ++ tp->ptp_clock = NULL; ++ tp->ptp_adjust = 0; ++} ++ ++#else /* IS_ENABLED(CONFIG_PTP_1588_CLOCK) */ ++ ++static cycle_t tg3_timecntr_read_clock(const struct cyclecounter *tc) ++{ ++ struct tg3 *tp = container_of(tc, struct tg3, cycles); ++ return tg3_refclk_read(tp); ++} ++ ++static void tg3_ptp_calibrate(struct tg3 *tp) ++{ ++ struct timespec now; ++ ++ getnstimeofday(&now); ++ tg3_refclk_write(tp, timespec_to_ns(&now)); ++ ++ /* Synchronize our NIC clock against system wall clock. */ ++ memset(&tp->cycles, 0, sizeof(tp->cycles)); ++ tp->cycles.read = tg3_timecntr_read_clock; ++ tp->cycles.mask = CLOCKSOURCE_MASK(64); ++ tp->cycles.mult = 1; ++ ++ timecounter_init(&tp->clock, ++ &tp->cycles, ++ ktime_to_ns(ktime_get_real())); ++ ++ memset(&tp->compare, 0, sizeof(tp->compare)); ++ tp->compare.source = &tp->clock; ++ tp->compare.target = ktime_get_real; ++ tp->compare.num_samples = 10; ++ timecompare_update(&tp->compare, 0); ++} ++ ++static void tg3_hwclock_to_timestamp(struct tg3 *tp, u64 hwclock, ++ struct skb_shared_hwtstamps *timestamp) ++{ ++ u64 ns = timecounter_cyc2time(&tp->clock, hwclock & TG3_TSTAMP_MASK); ++ timecompare_update(&tp->compare, ns); ++ ++ memset(timestamp, 0, sizeof(struct skb_shared_hwtstamps)); ++ timestamp->hwtstamp = ns_to_ktime(ns); ++ timestamp->syststamp = timecompare_transform(&tp->compare, ns); ++} ++ ++static void tg3_ptp_init(struct tg3 *tp) ++{ ++ if (!tg3_flag(tp, PTP_CAPABLE)) ++ return; ++ ++ tg3_ptp_calibrate(tp); ++} ++ ++static void tg3_ptp_resume(struct tg3 *tp) ++{ ++ if (!tg3_flag(tp, PTP_CAPABLE)) ++ return; ++ ++ tg3_ptp_calibrate(tp); ++} ++ ++static void tg3_ptp_fini(struct tg3 *tp) ++{ ++} ++#endif /* IS_ENABLED(CONFIG_PTP_1588_CLOCK) */ ++ ++#else /* BCM_HAS_IEEE1588_SUPPORT */ ++#define tg3_ptp_init(tp) ++#define tg3_ptp_resume(tp) ++#define tg3_ptp_fini(tp) ++#endif /* BCM_HAS_IEEE1588_SUPPORT */ ++ ++static inline int tg3_irq_sync(struct tg3 *tp) ++{ ++ return tp->irq_sync; ++} ++ ++static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len) ++{ ++ int i; ++ ++ dst = (u32 *)((u8 *)dst + off); ++ for (i = 0; i < len; i += sizeof(u32)) ++ *dst++ = tr32(off + i); ++} ++ ++static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs) ++{ ++ tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0); ++ tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200); ++ tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0); ++ tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0); ++ tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04); ++ tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80); ++ tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48); ++ tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04); ++ tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20); ++ tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c); ++ tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c); ++ tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c); ++ tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44); ++ tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04); ++ tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20); ++ tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14); ++ tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08); ++ tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08); ++ tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100); ++ ++ if (tg3_flag(tp, SUPPORT_MSIX)) ++ tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180); ++ ++ tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10); ++ tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58); ++ tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08); ++ tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08); ++ tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04); ++ tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04); ++ tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04); ++ tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04); ++ ++ if (!tg3_flag(tp, 5705_PLUS)) { ++ tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04); ++ tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04); ++ tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04); ++ } ++ ++ tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110); ++ tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120); ++ tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c); ++ tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04); ++ tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c); ++ ++ if (tg3_flag(tp, NVRAM)) ++ tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24); ++} ++ ++static void tg3_dump_state(struct tg3 *tp) ++{ ++ int i; ++ u32 *regs; ++ ++ regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC); ++ if (!regs) { ++ netdev_err(tp->dev, "Failed allocating register dump buffer\n"); ++ return; ++ } ++ ++ if (tg3_flag(tp, PCI_EXPRESS)) { ++ /* Read up to but not including private PCI registers */ ++ for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32)) ++ regs[i / sizeof(u32)] = tr32(i); ++ } else ++ tg3_dump_legacy_regs(tp, regs); ++ ++ for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) { ++ if (!regs[i + 0] && !regs[i + 1] && ++ !regs[i + 2] && !regs[i + 3]) ++ continue; ++ ++ netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n", ++ i * 4, ++ regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]); ++ } ++ ++ kfree(regs); ++ ++ for (i = 0; i < tp->irq_cnt; i++) { ++ struct tg3_napi *tnapi = &tp->napi[i]; ++ ++ /* SW status block */ ++ netdev_err(tp->dev, ++ "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n", ++ i, ++ tnapi->hw_status->status, ++ tnapi->hw_status->status_tag, ++ tnapi->hw_status->rx_jumbo_consumer, ++ tnapi->hw_status->rx_consumer, ++ tnapi->hw_status->rx_mini_consumer, ++ tnapi->hw_status->idx[0].rx_producer, ++ tnapi->hw_status->idx[0].tx_consumer); ++ ++ netdev_err(tp->dev, ++ "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n", ++ i, ++ tnapi->last_tag, tnapi->last_irq_tag, ++ tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending, ++ tnapi->rx_rcb_ptr, ++ tnapi->prodring.rx_std_prod_idx, ++ tnapi->prodring.rx_std_cons_idx, ++ tnapi->prodring.rx_jmb_prod_idx, ++ tnapi->prodring.rx_jmb_cons_idx); ++ } ++} ++ ++/* This is called whenever we suspect that the system chipset is re- ++ * ordering the sequence of MMIO to the tx send mailbox. The symptom ++ * is bogus tx completions. We try to recover by setting the ++ * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later ++ * in the workqueue. ++ */ ++static void tg3_tx_recover(struct tg3 *tp) ++{ ++ BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) || ++ tp->write32_tx_mbox == tg3_write_indirect_mbox); ++ ++ netdev_warn(tp->dev, ++ "The system may be re-ordering memory-mapped I/O " ++ "cycles to the network device, attempting to recover. " ++ "Please report the problem to the driver maintainer " ++ "and include system chipset information.\n"); ++ ++ tg3_flag_set(tp, TX_RECOVERY_PENDING); ++} ++ ++static inline u32 tg3_tx_avail(struct tg3_napi *tnapi) ++{ ++ /* Tell compiler to fetch tx indices from memory. */ ++ barrier(); ++ return tnapi->tx_pending - ++ ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1)); ++} ++ ++/* Tigon3 never reports partial packet sends. So we do not ++ * need special logic to handle SKBs that have not had all ++ * of their frags sent yet, like SunGEM does. ++ */ ++static void tg3_tx(struct tg3_napi *tnapi) ++{ ++ struct tg3 *tp = tnapi->tp; ++ u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer; ++ u32 sw_idx = tnapi->tx_cons; ++ struct netdev_queue *txq; ++ int index = tnapi - tp->napi; ++ unsigned int pkts_compl = 0, bytes_compl = 0; ++ ++ if (tg3_flag(tp, ENABLE_TSS)) ++ index--; ++ ++ txq = netdev_get_tx_queue(tp->dev, index); ++ ++ while (sw_idx != hw_idx) { ++ struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx]; ++ struct sk_buff *skb = ri->skb; ++ int i, tx_bug = 0; ++ ++ if (unlikely(skb == NULL)) { ++ tg3_tx_recover(tp); ++ return; ++ } ++ ++#ifdef BCM_HAS_IEEE1588_SUPPORT ++ if (tnapi->tx_ring[sw_idx].len_flags & TXD_FLAG_HWTSTAMP) { ++ struct skb_shared_hwtstamps timestamp; ++ u64 hwclock = tr32(TG3_TX_TSTAMP_LSB); ++ hwclock |= (u64)tr32(TG3_TX_TSTAMP_MSB) << 32; ++ ++ tg3_hwclock_to_timestamp(tp, hwclock, ×tamp); ++ ++ skb_tstamp_tx(skb, ×tamp); ++ } ++#endif /* BCM_HAS_IEEE1588_SUPPORT */ ++ ++ pci_unmap_single(tp->pdev, ++ dma_unmap_addr(ri, mapping), ++ skb_headlen(skb), ++ PCI_DMA_TODEVICE); ++ ++ ri->skb = NULL; ++ ++ while (ri->fragmented) { ++ ri->fragmented = false; ++ sw_idx = NEXT_TX(sw_idx); ++ ri = &tnapi->tx_buffers[sw_idx]; ++ } ++ ++ sw_idx = NEXT_TX(sw_idx); ++ ++ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { ++ ri = &tnapi->tx_buffers[sw_idx]; ++ if (unlikely(ri->skb != NULL || sw_idx == hw_idx)) ++ tx_bug = 1; ++ ++ pci_unmap_page(tp->pdev, ++ dma_unmap_addr(ri, mapping), ++ skb_frag_size(&skb_shinfo(skb)->frags[i]), ++ PCI_DMA_TODEVICE); ++ ++ while (ri->fragmented) { ++ ri->fragmented = false; ++ sw_idx = NEXT_TX(sw_idx); ++ ri = &tnapi->tx_buffers[sw_idx]; ++ } ++ ++ sw_idx = NEXT_TX(sw_idx); ++ } ++ ++ pkts_compl++; ++ bytes_compl += skb->len; ++ ++ dev_kfree_skb(skb); ++ ++ if (unlikely(tx_bug)) { ++ tg3_tx_recover(tp); ++ return; ++ } ++ } ++ ++ netdev_tx_completed_queue(txq, pkts_compl, bytes_compl); ++ ++ tnapi->tx_cons = sw_idx; ++ ++ /* Need to make the tx_cons update visible to tg3_start_xmit() ++ * before checking for netif_queue_stopped(). Without the ++ * memory barrier, there is a small possibility that tg3_start_xmit() ++ * will miss it and cause the queue to be stopped forever. ++ */ ++ smp_mb(); ++ ++ if (unlikely(netif_tx_queue_stopped(txq) && ++ (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) { ++ __netif_tx_lock(txq, smp_processor_id()); ++ if (netif_tx_queue_stopped(txq) && ++ (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))) ++ netif_tx_wake_queue(txq); ++ __netif_tx_unlock(txq); ++ } ++} ++ ++static void tg3_frag_free(bool is_frag, void *data) ++{ ++#ifdef BCM_HAS_BUILD_SKB ++ if (is_frag) ++ put_page(virt_to_head_page(data)); ++ else ++ kfree(data); ++#else ++ dev_kfree_skb_any(data); ++#endif ++} ++ ++static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz) ++{ ++ unsigned int skb_size = SKB_DATA_ALIGN(map_sz + TG3_RX_OFFSET(tp)) + ++ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); ++ ++ if (!ri->data) ++ return; ++ ++ pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping), ++ map_sz, PCI_DMA_FROMDEVICE); ++ tg3_frag_free(skb_size <= PAGE_SIZE, ri->data); ++ ri->data = NULL; ++} ++ ++/* Returns size of skb allocated or < 0 on error. ++ * ++ * We only need to fill in the address because the other members ++ * of the RX descriptor are invariant, see tg3_init_rings. ++ * ++ * Note the purposeful assymetry of cpu vs. chip accesses. For ++ * posting buffers we only dirty the first cache line of the RX ++ * descriptor (containing the address). Whereas for the RX status ++ * buffers the cpu only reads the last cacheline of the RX descriptor ++ * (to fetch the error flags, vlan tag, checksum, and opaque cookie). ++ */ ++static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr, ++ u32 opaque_key, u32 dest_idx_unmasked, ++ unsigned int *frag_size) ++{ ++ struct tg3_rx_buffer_desc *desc; ++ struct ring_info *map; ++ u8 *data; ++ dma_addr_t mapping; ++#ifdef BCM_HAS_BUILD_SKB ++ int skb_size; ++#else ++ struct sk_buff *skb; ++#endif ++ int data_size, dest_idx; ++ ++ switch (opaque_key) { ++ case RXD_OPAQUE_RING_STD: ++ dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask; ++ desc = &tpr->rx_std[dest_idx]; ++ map = &tpr->rx_std_buffers[dest_idx]; ++ data_size = tp->rx_pkt_map_sz; ++ break; ++ ++ case RXD_OPAQUE_RING_JUMBO: ++ dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask; ++ desc = &tpr->rx_jmb[dest_idx].std; ++ map = &tpr->rx_jmb_buffers[dest_idx]; ++ data_size = TG3_RX_JMB_MAP_SZ; ++ break; ++ ++ default: ++ return -EINVAL; ++ } ++ ++ /* Do not overwrite any of the map or rp information ++ * until we are sure we can commit to a new buffer. ++ * ++ * Callers depend upon this behavior and assume that ++ * we leave everything unchanged if we fail. ++ */ ++#ifdef BCM_HAS_BUILD_SKB ++ skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) + ++ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); ++ if (skb_size <= PAGE_SIZE) { ++ data = netdev_alloc_frag(skb_size); ++ *frag_size = skb_size; ++ } else { ++ data = kmalloc(skb_size, GFP_ATOMIC); ++ *frag_size = 0; ++ } ++ if (!data) ++ return -ENOMEM; ++#else ++ skb = netdev_alloc_skb(tp->dev, data_size + TG3_RX_OFFSET(tp) + ++ TG3_COMPAT_VLAN_ALLOC_LEN); ++ if (skb == NULL) ++ return -ENOMEM; ++ ++ skb_reserve(skb, TG3_RX_OFFSET(tp) + ++ TG3_COMPAT_VLAN_RESERVE(TG3_TO_INT(skb->data))); ++ data = skb->data; ++ ++#endif ++ ++ mapping = pci_map_single(tp->pdev, ++ data + TG3_RX_OFFSET(tp), ++ data_size, ++ PCI_DMA_FROMDEVICE); ++ if (unlikely(pci_dma_mapping_error_(tp->pdev, mapping))) { ++#ifdef BCM_HAS_BUILD_SKB ++ tg3_frag_free(skb_size <= PAGE_SIZE, data); ++#else ++ dev_kfree_skb(skb); ++#endif ++ return -EIO; ++ } ++ ++#ifdef BCM_HAS_BUILD_SKB ++ map->data = data; ++#else ++ map->data = skb; ++#endif ++ dma_unmap_addr_set(map, mapping, mapping); ++ ++ desc->addr_hi = ((u64)mapping >> 32); ++ desc->addr_lo = ((u64)mapping & 0xffffffff); ++ ++ return data_size; ++} ++ ++/* We only need to move over in the address because the other ++ * members of the RX descriptor are invariant. See notes above ++ * tg3_alloc_rx_data for full details. ++ */ ++static void tg3_recycle_rx(struct tg3_napi *tnapi, ++ struct tg3_rx_prodring_set *dpr, ++ u32 opaque_key, int src_idx, ++ u32 dest_idx_unmasked) ++{ ++ struct tg3 *tp = tnapi->tp; ++ struct tg3_rx_buffer_desc *src_desc, *dest_desc; ++ struct ring_info *src_map, *dest_map; ++ struct tg3_rx_prodring_set *spr = tnapi->srcprodring; ++ int dest_idx; ++ ++ switch (opaque_key) { ++ case RXD_OPAQUE_RING_STD: ++ dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask; ++ dest_desc = &dpr->rx_std[dest_idx]; ++ dest_map = &dpr->rx_std_buffers[dest_idx]; ++ src_desc = &spr->rx_std[src_idx]; ++ src_map = &spr->rx_std_buffers[src_idx]; ++ break; ++ ++ case RXD_OPAQUE_RING_JUMBO: ++ dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask; ++ dest_desc = &dpr->rx_jmb[dest_idx].std; ++ dest_map = &dpr->rx_jmb_buffers[dest_idx]; ++ src_desc = &spr->rx_jmb[src_idx].std; ++ src_map = &spr->rx_jmb_buffers[src_idx]; ++ break; ++ ++ default: ++ return; ++ } ++ ++ dest_map->data = src_map->data; ++ dma_unmap_addr_set(dest_map, mapping, ++ dma_unmap_addr(src_map, mapping)); ++ dest_desc->addr_hi = src_desc->addr_hi; ++ dest_desc->addr_lo = src_desc->addr_lo; ++ ++ /* Ensure that the update to the skb happens after the physical ++ * addresses have been transferred to the new BD location. ++ */ ++ smp_wmb(); ++ ++ src_map->data = NULL; ++} ++ ++/* The RX ring scheme is composed of multiple rings which post fresh ++ * buffers to the chip, and one special ring the chip uses to report ++ * status back to the host. ++ * ++ * The special ring reports the status of received packets to the ++ * host. The chip does not write into the original descriptor the ++ * RX buffer was obtained from. The chip simply takes the original ++ * descriptor as provided by the host, updates the status and length ++ * field, then writes this into the next status ring entry. ++ * ++ * Each ring the host uses to post buffers to the chip is described ++ * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives, ++ * it is first placed into the on-chip ram. When the packet's length ++ * is known, it walks down the TG3_BDINFO entries to select the ring. ++ * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO ++ * which is within the range of the new packet's length is chosen. ++ * ++ * The "separate ring for rx status" scheme may sound queer, but it makes ++ * sense from a cache coherency perspective. If only the host writes ++ * to the buffer post rings, and only the chip writes to the rx status ++ * rings, then cache lines never move beyond shared-modified state. ++ * If both the host and chip were to write into the same ring, cache line ++ * eviction could occur since both entities want it in an exclusive state. ++ */ ++static int tg3_rx(struct tg3_napi *tnapi, int budget) ++{ ++ struct tg3 *tp = tnapi->tp; ++ u32 work_mask, rx_std_posted = 0; ++ u32 std_prod_idx, jmb_prod_idx; ++ u32 sw_idx = tnapi->rx_rcb_ptr; ++ u16 hw_idx; ++ int received; ++ struct tg3_rx_prodring_set *tpr = &tnapi->prodring; ++ ++ hw_idx = *(tnapi->rx_rcb_prod_idx); ++ /* ++ * We need to order the read of hw_idx and the read of ++ * the opaque cookie. ++ */ ++ rmb(); ++ work_mask = 0; ++ received = 0; ++ std_prod_idx = tpr->rx_std_prod_idx; ++ jmb_prod_idx = tpr->rx_jmb_prod_idx; ++ while (sw_idx != hw_idx && budget > 0) { ++ struct ring_info *ri; ++ struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx]; ++ unsigned int len; ++ struct sk_buff *skb; ++ dma_addr_t dma_addr; ++ u32 opaque_key, desc_idx, *post_ptr; ++ u8 *data; ++#ifdef BCM_HAS_IEEE1588_SUPPORT ++ u64 tstamp = 0; ++#endif /* BCM_HAS_IEEE1588_SUPPORT */ ++ ++ desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK; ++ opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK; ++ if (opaque_key == RXD_OPAQUE_RING_STD) { ++ ri = &tnapi->srcprodring->rx_std_buffers[desc_idx]; ++ dma_addr = dma_unmap_addr(ri, mapping); ++#ifdef BCM_HAS_BUILD_SKB ++ data = ri->data; ++#else ++ skb = ri->data; ++ data = skb->data; ++#endif ++ post_ptr = &std_prod_idx; ++ rx_std_posted++; ++ } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) { ++ ri = &tnapi->srcprodring->rx_jmb_buffers[desc_idx]; ++ dma_addr = dma_unmap_addr(ri, mapping); ++#ifdef BCM_HAS_BUILD_SKB ++ data = ri->data; ++#else ++ skb = ri->data; ++ data = skb->data; ++#endif ++ post_ptr = &jmb_prod_idx; ++ } else ++ goto next_pkt_nopost; ++ ++ work_mask |= opaque_key; ++ ++ if (desc->err_vlan & RXD_ERR_MASK) { ++#ifdef TG3_VMWARE_NETQ_ENABLE ++ tnapi->netq.stats.rx_errors_sw++; ++ ++ if (desc->err_vlan & RXD_ERR_BAD_CRC) ++ tnapi->netq.stats.rx_crc_errors++; ++ ++ if (desc->err_vlan & ++ (RXD_ERR_TOO_SMALL | ++ RXD_ERR_HUGE_FRAME)) ++ tnapi->netq.stats.rx_frame_errors++; ++#endif ++ drop_it: ++ tg3_recycle_rx(tnapi, tpr, opaque_key, ++ desc_idx, *post_ptr); ++ drop_it_no_recycle: ++ /* Other statistics kept track of by card. */ ++ tp->rx_dropped++; ++ goto next_pkt; ++ } ++ ++ len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - ++ ETH_FCS_LEN; ++ ++#ifdef BCM_HAS_IEEE1588_SUPPORT ++ if ((desc->type_flags & RXD_FLAG_PTPSTAT_MASK) == ++ RXD_FLAG_PTPSTAT_PTPV1 || ++ (desc->type_flags & RXD_FLAG_PTPSTAT_MASK) == ++ RXD_FLAG_PTPSTAT_PTPV2) { ++ /* Read the timestamp out early, in case we drop the packet. */ ++ tstamp = tr32(TG3_RX_TSTAMP_LSB); ++ tstamp |= (u64)tr32(TG3_RX_TSTAMP_MSB) << 32; ++ } ++#endif /* BCM_HAS_IEEE1588_SUPPORT */ ++ ++ if (len > TG3_RX_COPY_THRESH(tp)) { ++ int skb_size; ++ unsigned int frag_size; ++ ++ skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key, ++ *post_ptr, &frag_size); ++ if (skb_size < 0) ++ goto drop_it; ++ ++ pci_unmap_single(tp->pdev, dma_addr, skb_size, ++ PCI_DMA_FROMDEVICE); ++ ++ /* Ensure that the update to the data happens ++ * after the usage of the old DMA mapping. ++ */ ++ smp_wmb(); ++ ++ ri->data = NULL; ++ ++#ifdef BCM_HAS_BUILD_SKB ++ skb = build_skb(data, frag_size); ++ if (!skb) { ++ tg3_frag_free(frag_size != 0, data); ++ goto drop_it_no_recycle; ++ } ++ skb_reserve(skb, TG3_RX_OFFSET(tp)); ++#endif ++ } else { ++ tg3_recycle_rx(tnapi, tpr, opaque_key, ++ desc_idx, *post_ptr); ++ ++ skb = netdev_alloc_skb(tp->dev, ++ len + TG3_RAW_IP_ALIGN); ++ if (skb == NULL) ++ goto drop_it_no_recycle; ++ ++ skb_reserve(skb, TG3_RAW_IP_ALIGN); ++ pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE); ++ memcpy(skb->data, ++ data + TG3_RX_OFFSET(tp), ++ len); ++ pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE); ++ } ++ ++ skb_put(skb, len); ++#ifdef BCM_HAS_IEEE1588_SUPPORT ++ if (tstamp) ++ tg3_hwclock_to_timestamp(tp, tstamp, ++ skb_hwtstamps(skb)); ++#endif /* BCM_HAS_IEEE1588_SUPPORT */ ++ ++ if ((tp->dev->features & NETIF_F_RXCSUM) && ++ (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) && ++ (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK) ++ >> RXD_TCPCSUM_SHIFT) == 0xffff)) ++ skb->ip_summed = CHECKSUM_UNNECESSARY; ++ else ++ skb_checksum_none_assert(skb); ++ ++#ifndef BCM_HAS_NEW_VLAN_INTERFACE ++ if (desc->type_flags & RXD_FLAG_VLAN) { ++ if (tp->rx_mode & RX_MODE_KEEP_VLAN_TAG) { ++ desc->type_flags &= ~RXD_FLAG_VLAN; ++ } else if (!tp->vlgrp) { ++ struct vlan_ethhdr *ve = (struct vlan_ethhdr *) ++ __skb_push(skb, VLAN_HLEN); ++ ++ memmove(ve, skb->data + VLAN_HLEN, ++ ETH_ALEN * 2); ++ ve->h_vlan_proto = htons(ETH_P_8021Q); ++ ve->h_vlan_TCI = htons(desc->err_vlan & RXD_VLAN_MASK); ++ ++ desc->type_flags &= ~RXD_FLAG_VLAN; ++ } ++ } ++#endif /* BCM_HAS_NEW_VLAN_INTERFACE */ ++ ++ skb->protocol = eth_type_trans(skb, tp->dev); ++ ++ if (len > (tp->dev->mtu + ETH_HLEN) && ++ skb->protocol != htons(ETH_P_8021Q)) { ++ dev_kfree_skb(skb); ++ goto drop_it_no_recycle; ++ } ++ ++#ifndef BCM_HAS_NEW_VLAN_INTERFACE ++ if (desc->type_flags & RXD_FLAG_VLAN) { ++ vlan_gro_receive(&tnapi->napi, tp->vlgrp, ++ desc->err_vlan & RXD_VLAN_MASK, skb); ++ } else ++#else ++ if (desc->type_flags & RXD_FLAG_VLAN && ++ !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG)) ++ __vlan_hwaccel_put_tag(skb, ++#ifdef BCM_HWACCEL_HAS_PROTO_ARG ++ htons(ETH_P_8021Q), ++#endif ++ desc->err_vlan & RXD_VLAN_MASK); ++#endif /* BCM_HAS_NEW_VLAN_INTERFACE */ ++ ++ napi_gro_receive(&tnapi->napi, skb); ++ ++#if (LINUX_VERSION_CODE < 0x02061D) /* 2.6.29 */ ++ tp->dev->last_rx = jiffies; ++#endif ++ received++; ++ budget--; ++ ++#ifdef TG3_VMWARE_NETQ_ENABLE ++ /* Update queue specific stats */ ++ tnapi->netq.stats.rx_packets_sw++; ++ tnapi->netq.stats.rx_bytes_sw += len; ++#endif ++ ++next_pkt: ++ (*post_ptr)++; ++ ++ if (unlikely(rx_std_posted >= tp->rx_std_max_post)) { ++ tpr->rx_std_prod_idx = std_prod_idx & ++ tp->rx_std_ring_mask; ++ tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, ++ tpr->rx_std_prod_idx); ++ work_mask &= ~RXD_OPAQUE_RING_STD; ++ rx_std_posted = 0; ++ } ++next_pkt_nopost: ++ sw_idx++; ++ sw_idx &= tp->rx_ret_ring_mask; ++ ++ /* Refresh hw_idx to see if there is new work */ ++ if (sw_idx == hw_idx) { ++ hw_idx = *(tnapi->rx_rcb_prod_idx); ++ rmb(); ++ } ++ } ++ ++ /* ACK the status ring. */ ++ tnapi->rx_rcb_ptr = sw_idx; ++ tw32_rx_mbox(tnapi->consmbox, sw_idx); ++ ++ /* Refill RX ring(s). */ ++ if (!tg3_flag(tp, ENABLE_RSS)) { ++ /* Sync BD data before updating mailbox */ ++ wmb(); ++ ++ if (work_mask & RXD_OPAQUE_RING_STD) { ++ tpr->rx_std_prod_idx = std_prod_idx & ++ tp->rx_std_ring_mask; ++#ifdef TG3_VMWARE_NETQ_ENABLE ++ tw32_rx_mbox(tpr->rx_std_mbox, tpr->rx_std_prod_idx); ++#else ++ tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, ++ tpr->rx_std_prod_idx); ++#endif ++ } ++ if (work_mask & RXD_OPAQUE_RING_JUMBO) { ++ tpr->rx_jmb_prod_idx = jmb_prod_idx & ++ tp->rx_jmb_ring_mask; ++#ifdef TG3_VMWARE_NETQ_ENABLE ++ tw32_rx_mbox(tpr->rx_jmb_mbox, tpr->rx_jmb_prod_idx); ++#else ++ tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, ++ tpr->rx_jmb_prod_idx); ++#endif ++ } ++ mmiowb(); ++ } else if (work_mask) { ++ /* rx_std_buffers[] and rx_jmb_buffers[] entries must be ++ * updated before the producer indices can be updated. ++ */ ++ smp_wmb(); ++ ++ tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask; ++ tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask; ++ ++ if (tnapi != &tp->napi[1]) { ++ tp->rx_refill = true; ++ napi_schedule_(tp->dev, &tp->napi[1].napi); ++ } ++ } ++ ++ return received; ++} ++ ++static void tg3_poll_link(struct tg3 *tp) ++{ ++ /* handle link change and other phy events */ ++ if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) { ++ struct tg3_hw_status *sblk = tp->napi[0].hw_status; ++ ++ if (sblk->status & SD_STATUS_LINK_CHG) { ++ sblk->status = SD_STATUS_UPDATED | ++ (sblk->status & ~SD_STATUS_LINK_CHG); ++ spin_lock(&tp->lock); ++ if (tg3_flag(tp, USE_PHYLIB)) { ++ tw32_f(MAC_STATUS, ++ (MAC_STATUS_SYNC_CHANGED | ++ MAC_STATUS_CFG_CHANGED | ++ MAC_STATUS_MI_COMPLETION | ++ MAC_STATUS_LNKSTATE_CHANGED)); ++ udelay(40); ++ } else ++ tg3_setup_phy(tp, false); ++ spin_unlock(&tp->lock); ++ } ++ } ++} ++ ++static inline void tg3_reset_task_schedule(struct tg3 *tp) ++{ ++ if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags)) ++ schedule_work(&tp->reset_task); ++} ++ ++static inline void tg3_reset_task_cancel(struct tg3 *tp) ++{ ++#if (LINUX_VERSION_CODE >= 0x20616) || defined (__VMKLNX__) ++ cancel_work_sync(&tp->reset_task); ++#else ++ set_current_state(TASK_UNINTERRUPTIBLE); ++ schedule_timeout(1); ++#endif ++ tg3_flag_clear(tp, RESET_TASK_PENDING); ++ tg3_flag_clear(tp, TX_RECOVERY_PENDING); ++} ++ ++static void tg3_process_error(struct tg3 *tp) ++{ ++ u32 val; ++ bool real_error = false; ++ ++ if (tg3_flag(tp, ERROR_PROCESSED)) ++ return; ++ ++ /* Check Flow Attention register */ ++ val = tr32(HOSTCC_FLOW_ATTN); ++#ifdef TG3_VMWARE_NETQ_ENABLE ++ /* Shutting down NetQueues cause permissible RCB errors */ ++ val &= ~(HOSTCC_FLOW_ATTN_MBUF_LWM | ++ HOSTCC_FLOW_ATTN_RCB_MISCFG | ++ HOSTCC_FLOW_ATTN_RCV_BDI_ATTN); ++#endif ++ if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) { ++ netdev_err(tp->dev, "FLOW Attention error. Resetting chip.\n"); ++ real_error = true; ++ } ++ ++ if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) { ++ netdev_err(tp->dev, "MSI Status error. Resetting chip.\n"); ++ real_error = true; ++ } ++ ++ if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) { ++ netdev_err(tp->dev, "DMA Status error. Resetting chip.\n"); ++ real_error = true; ++ } ++ ++ if (!real_error) ++ return; ++ ++#if !defined(__VMKLNX__) ++ /* Encounterred real error */ ++ tp->recoverable_err++; ++ ++ /* Check if we received two recoverable error within 10 seconds, if so ++ * set the unrecoverable flag and move this port to close state ++ */ ++ if (time_before(jiffies, ++ tp->recoverable_err_jiffies + ++ tp->recoverable_err_interval)) ++ tp->unrecoverable_err++; ++ ++ tp->recoverable_err_jiffies = jiffies; ++#endif ++ ++ tg3_dump_state(tp); ++ ++ tg3_flag_set(tp, ERROR_PROCESSED); ++ tg3_reset_task_schedule(tp); ++} ++ ++static inline void tg3_send_ape_heartbeat(struct tg3 *tp, ++ unsigned long interval) ++{ ++ /* Check if hb interval has exceeded */ ++ if (!tg3_flag(tp, ENABLE_APE) || ++ time_before(jiffies, tp->ape_hb_jiffies + interval)) ++ return; ++ ++ tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_COUNT, tp->ape_hb++); ++ tp->ape_hb_jiffies = jiffies; ++ return; ++} ++ ++#ifdef TG3_NAPI ++ ++static int tg3_rx_prodring_xfer(struct tg3 *tp, ++ struct tg3_rx_prodring_set *dpr, ++ struct tg3_rx_prodring_set *spr) ++{ ++ u32 si, di, cpycnt, src_prod_idx; ++ int i, err = 0; ++ ++ while (1) { ++ src_prod_idx = spr->rx_std_prod_idx; ++ ++ /* Make sure updates to the rx_std_buffers[] entries and the ++ * standard producer index are seen in the correct order. ++ */ ++ smp_rmb(); ++ ++ if (spr->rx_std_cons_idx == src_prod_idx) ++ break; ++ ++ if (spr->rx_std_cons_idx < src_prod_idx) ++ cpycnt = src_prod_idx - spr->rx_std_cons_idx; ++ else ++ cpycnt = tp->rx_std_ring_mask + 1 - ++ spr->rx_std_cons_idx; ++ ++ cpycnt = min(cpycnt, ++ tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx); ++ ++ si = spr->rx_std_cons_idx; ++ di = dpr->rx_std_prod_idx; ++ ++ for (i = di; i < di + cpycnt; i++) { ++ if (dpr->rx_std_buffers[i].data) { ++ cpycnt = i - di; ++ err = -ENOSPC; ++ break; ++ } ++ } ++ ++ if (!cpycnt) ++ break; ++ ++ /* Ensure that updates to the rx_std_buffers ring and the ++ * shadowed hardware producer ring from tg3_recycle_skb() are ++ * ordered correctly WRT the skb check above. ++ */ ++ smp_rmb(); ++ ++ memcpy(&dpr->rx_std_buffers[di], ++ &spr->rx_std_buffers[si], ++ cpycnt * sizeof(struct ring_info)); ++ ++ for (i = 0; i < cpycnt; i++, di++, si++) { ++ struct tg3_rx_buffer_desc *sbd, *dbd; ++ sbd = &spr->rx_std[si]; ++ dbd = &dpr->rx_std[di]; ++ dbd->addr_hi = sbd->addr_hi; ++ dbd->addr_lo = sbd->addr_lo; ++ } ++ ++ spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) & ++ tp->rx_std_ring_mask; ++ dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) & ++ tp->rx_std_ring_mask; ++ } ++ ++ while (1) { ++ src_prod_idx = spr->rx_jmb_prod_idx; ++ ++ /* Make sure updates to the rx_jmb_buffers[] entries and ++ * the jumbo producer index are seen in the correct order. ++ */ ++ smp_rmb(); ++ ++ if (spr->rx_jmb_cons_idx == src_prod_idx) ++ break; ++ ++ if (spr->rx_jmb_cons_idx < src_prod_idx) ++ cpycnt = src_prod_idx - spr->rx_jmb_cons_idx; ++ else ++ cpycnt = tp->rx_jmb_ring_mask + 1 - ++ spr->rx_jmb_cons_idx; ++ ++ cpycnt = min(cpycnt, ++ tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx); ++ ++ si = spr->rx_jmb_cons_idx; ++ di = dpr->rx_jmb_prod_idx; ++ ++ for (i = di; i < di + cpycnt; i++) { ++ if (dpr->rx_jmb_buffers[i].data) { ++ cpycnt = i - di; ++ err = -ENOSPC; ++ break; ++ } ++ } ++ ++ if (!cpycnt) ++ break; ++ ++ /* Ensure that updates to the rx_jmb_buffers ring and the ++ * shadowed hardware producer ring from tg3_recycle_skb() are ++ * ordered correctly WRT the skb check above. ++ */ ++ smp_rmb(); ++ ++ memcpy(&dpr->rx_jmb_buffers[di], ++ &spr->rx_jmb_buffers[si], ++ cpycnt * sizeof(struct ring_info)); ++ ++ for (i = 0; i < cpycnt; i++, di++, si++) { ++ struct tg3_rx_buffer_desc *sbd, *dbd; ++ sbd = &spr->rx_jmb[si].std; ++ dbd = &dpr->rx_jmb[di].std; ++ dbd->addr_hi = sbd->addr_hi; ++ dbd->addr_lo = sbd->addr_lo; ++ } ++ ++ spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) & ++ tp->rx_jmb_ring_mask; ++ dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) & ++ tp->rx_jmb_ring_mask; ++ } ++ ++ return err; ++} ++ ++static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget) ++{ ++ struct tg3 *tp = tnapi->tp; ++ ++ /* run TX completion thread */ ++ if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) { ++ tg3_tx(tnapi); ++ if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING))) ++ return work_done; ++ } ++ ++ if (!tnapi->rx_rcb_prod_idx) ++ return work_done; ++ ++ /* run RX thread, within the bounds set by NAPI. ++ * All RX "locking" is done by ensuring outside ++ * code synchronizes with tg3->napi.poll() ++ */ ++ if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr) ++ work_done += tg3_rx(tnapi, budget - work_done); ++ ++ if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) { ++ struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring; ++ int i, err = 0; ++ u32 std_prod_idx = dpr->rx_std_prod_idx; ++ u32 jmb_prod_idx = dpr->rx_jmb_prod_idx; ++ ++ tp->rx_refill = false; ++ for (i = 1; i <= tp->rxq_cnt; i++) ++ err |= tg3_rx_prodring_xfer(tp, dpr, ++ &tp->napi[i].prodring); ++ ++ wmb(); ++ ++ if (std_prod_idx != dpr->rx_std_prod_idx) ++ tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, ++ dpr->rx_std_prod_idx); ++ ++ if (jmb_prod_idx != dpr->rx_jmb_prod_idx) ++ tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, ++ dpr->rx_jmb_prod_idx); ++ ++ mmiowb(); ++ ++ if (err) ++ tw32_f(HOSTCC_MODE, tp->coal_now); ++ } ++ ++ return work_done; ++} ++ ++static int tg3_poll_msix(struct napi_struct *napi, int budget) ++{ ++ struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi); ++ struct tg3 *tp = tnapi->tp; ++ int work_done = 0; ++ struct tg3_hw_status *sblk = tnapi->hw_status; ++ ++ while (1) { ++ work_done = tg3_poll_work(tnapi, work_done, budget); ++ ++ if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING))) ++ goto tx_recovery; ++ ++ if (unlikely(work_done >= budget)) ++ break; ++ ++ /* tp->last_tag is used in tg3_int_reenable() below ++ * to tell the hw how much work has been processed, ++ * so we must read it before checking for more work. ++ */ ++ tnapi->last_tag = sblk->status_tag; ++ tnapi->last_irq_tag = tnapi->last_tag; ++ rmb(); ++ ++ /* check for RX/TX work to do */ ++ if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons && ++ *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) { ++ ++ /* This test here is not race free, but will reduce ++ * the number of interrupts by looping again. ++ */ ++ if (tnapi == &tp->napi[1] && tp->rx_refill) ++ continue; ++ ++ napi_complete_(tp->dev, napi); ++ /* Reenable interrupts. */ ++ tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24); ++ ++ /* This test here is synchronized by napi_schedule() ++ * and napi_complete() to close the race condition. ++ */ ++ if (unlikely(tnapi == &tp->napi[1] && tp->rx_refill)) { ++ tw32(HOSTCC_MODE, tp->coalesce_mode | ++ HOSTCC_MODE_ENABLE | ++ tnapi->coal_now); ++ } ++ mmiowb(); ++ break; ++ } ++ } ++ ++ tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL << 1); ++ return work_done; ++ ++tx_recovery: ++ /* work_done is guaranteed to be less than budget. */ ++ napi_complete_(tp->dev, napi); ++ tg3_reset_task_schedule(tp); ++ return work_done; ++} ++ ++static int tg3_poll(struct napi_struct *napi, int budget) ++{ ++ struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi); ++ struct tg3 *tp = tnapi->tp; ++ int work_done = 0; ++ struct tg3_hw_status *sblk = tnapi->hw_status; ++ ++ while (1) { ++ if (sblk->status & SD_STATUS_ERROR) ++ tg3_process_error(tp); ++ ++ tg3_poll_link(tp); ++ ++ work_done = tg3_poll_work(tnapi, work_done, budget); ++ ++ if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING))) ++ goto tx_recovery; ++ ++ if (unlikely(work_done >= budget)) ++ break; ++ ++ if (tg3_flag(tp, TAGGED_STATUS)) { ++ /* tp->last_tag is used in tg3_int_reenable() below ++ * to tell the hw how much work has been processed, ++ * so we must read it before checking for more work. ++ */ ++ tnapi->last_tag = sblk->status_tag; ++ tnapi->last_irq_tag = tnapi->last_tag; ++ rmb(); ++ } else ++ sblk->status &= ~SD_STATUS_UPDATED; ++ ++ if (likely(!tg3_has_work(tnapi))) { ++ napi_complete_(tp->dev, napi); ++ tg3_int_reenable(tnapi); ++ break; ++ } ++ } ++ ++ tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL << 1); ++ return work_done; ++ ++tx_recovery: ++ /* work_done is guaranteed to be less than budget. */ ++ napi_complete_(tp->dev, napi); ++ tg3_reset_task_schedule(tp); ++ return work_done; ++} ++ ++#else ++ ++static int tg3_poll(struct net_device *netdev, int *budget) ++{ ++ struct tg3 *tp = netdev_priv(netdev); ++ struct tg3_napi *tnapi = &tp->napi[0]; ++ struct tg3_hw_status *sblk = tnapi->hw_status; ++ int done; ++ ++ if (sblk->status & SD_STATUS_ERROR) ++ tg3_process_error(tp); ++ ++ tg3_poll_link(tp); ++ ++ /* run TX completion thread */ ++ if (sblk->idx[0].tx_consumer != tnapi->tx_cons) { ++ tg3_tx(tnapi); ++ if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING))) { ++ netif_rx_complete(netdev); ++ tg3_reset_task_schedule(tp); ++ return 0; ++ } ++ } ++ ++ /* run RX thread, within the bounds set by NAPI. ++ * All RX "locking" is done by ensuring outside ++ * code synchronizes with dev->poll() ++ */ ++ if (sblk->idx[0].rx_producer != tnapi->rx_rcb_ptr) { ++ int orig_budget = *budget; ++ int work_done; ++ ++ if (orig_budget > netdev->quota) ++ orig_budget = netdev->quota; ++ ++ work_done = tg3_rx(tnapi, orig_budget); ++ ++ *budget -= work_done; ++ netdev->quota -= work_done; ++ } ++ ++ if (tg3_flag(tp, TAGGED_STATUS)) { ++ tnapi->last_tag = sblk->status_tag; ++ rmb(); ++ } else ++ sblk->status &= ~SD_STATUS_UPDATED; ++ ++ /* if no more work, tell net stack and NIC we're done */ ++ done = !tg3_has_work(tnapi); ++ if (done) { ++ netif_rx_complete(netdev); ++ tg3_int_reenable(tnapi); ++ } ++ ++ return (done ? 0 : 1); ++} ++ ++#endif /* TG3_NAPI */ ++ ++static void tg3_napi_disable(struct tg3 *tp) ++{ ++#ifdef TG3_NAPI ++ int i; ++ ++ for (i = tp->irq_cnt - 1; i >= 0; i--) ++ napi_disable(&tp->napi[i].napi); ++#else ++ netif_poll_disable(tp->dev); ++#endif ++} ++ ++static void tg3_napi_enable(struct tg3 *tp) ++{ ++#ifdef TG3_NAPI ++ int i; ++ ++ for (i = 0; i < tp->irq_cnt; i++) ++ napi_enable(&tp->napi[i].napi); ++#else ++ netif_poll_enable(tp->dev); ++#endif ++} ++ ++static void tg3_napi_init(struct tg3 *tp) ++{ ++#ifdef TG3_NAPI ++ int i; ++ ++ netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64); ++ for (i = 1; i < tp->irq_cnt; i++) ++ netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64); ++#else ++ tp->dev->poll = tg3_poll; ++ tp->dev->weight = 64; ++#endif ++} ++ ++static void tg3_napi_fini(struct tg3 *tp) ++{ ++#ifdef TG3_NAPI ++ int i; ++ ++ for (i = 0; i < tp->irq_cnt; i++) ++ netif_napi_del(&tp->napi[i].napi); ++#endif ++} ++ ++static inline void tg3_netif_stop(struct tg3 *tp) ++{ ++ tp->dev->trans_start = jiffies; /* prevent tx timeout */ ++ tg3_napi_disable(tp); ++ netif_carrier_off(tp->dev); /* prevent spurious tx timeout */ ++ netif_tx_disable(tp->dev); ++} ++ ++/* tp->lock must be held */ ++static inline void tg3_netif_start(struct tg3 *tp) ++{ ++ tg3_ptp_resume(tp); ++ ++ /* NOTE: unconditional netif_tx_wake_all_queues is only ++ * appropriate so long as all callers are assured to ++ * have free tx slots (such as after tg3_init_hw) ++ */ ++ netif_tx_wake_all_queues(tp->dev); ++ ++ if (tp->link_up) ++ netif_carrier_on(tp->dev); ++ ++ tg3_napi_enable(tp); ++ tp->napi[0].hw_status->status |= SD_STATUS_UPDATED; ++ tg3_enable_ints(tp); ++} ++ ++static void tg3_irq_quiesce(struct tg3 *tp) ++{ ++#if (LINUX_VERSION_CODE >= 0x2051c) ++ int i; ++#endif ++ ++ BUG_ON(tp->irq_sync); ++ ++ tp->irq_sync = 1; ++ smp_mb(); ++ ++#if (LINUX_VERSION_CODE >= 0x2051c) ++ for (i = 0; i < tp->irq_cnt; i++) ++ synchronize_irq(tp->napi[i].irq_vec); ++#else ++ synchronize_irq(); ++#endif ++} ++ ++/* Fully shutdown all tg3 driver activity elsewhere in the system. ++ * If irq_sync is non-zero, then the IRQ handler must be synchronized ++ * with as well. Most of the time, this is not necessary except when ++ * shutting down the device. ++ */ ++static inline void tg3_full_lock(struct tg3 *tp, int irq_sync) ++{ ++ spin_lock_bh(&tp->lock); ++ if (irq_sync) ++ tg3_irq_quiesce(tp); ++} ++ ++static inline void tg3_full_unlock(struct tg3 *tp) ++{ ++ spin_unlock_bh(&tp->lock); ++} ++ ++/* One-shot MSI handler - Chip automatically disables interrupt ++ * after sending MSI so driver doesn't have to do it. ++ */ ++#ifdef BCM_HAS_NEW_IRQ_SIG ++static irqreturn_t tg3_msi_1shot(int irq, void *dev_id) ++#else ++static irqreturn_t tg3_msi_1shot(int irq, void *dev_id, struct pt_regs *regs) ++#endif ++{ ++ struct tg3_napi *tnapi = dev_id; ++ struct tg3 *tp = tnapi->tp; ++ ++ prefetch(tnapi->hw_status); ++ if (tnapi->rx_rcb) ++ prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]); ++ ++ if (likely(!tg3_irq_sync(tp))) ++ napi_schedule_(tp->dev, &tnapi->napi); ++ ++ return IRQ_HANDLED; ++} ++ ++/* MSI ISR - No need to check for interrupt sharing and no need to ++ * flush status block and interrupt mailbox. PCI ordering rules ++ * guarantee that MSI will arrive after the status block. ++ */ ++#ifdef BCM_HAS_NEW_IRQ_SIG ++static irqreturn_t tg3_msi(int irq, void *dev_id) ++#else ++static irqreturn_t tg3_msi(int irq, void *dev_id, struct pt_regs *regs) ++#endif ++{ ++ struct tg3_napi *tnapi = dev_id; ++ struct tg3 *tp = tnapi->tp; ++ ++ prefetch(tnapi->hw_status); ++ if (tnapi->rx_rcb) ++ prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]); ++ /* ++ * Writing any value to intr-mbox-0 clears PCI INTA# and ++ * chip-internal interrupt pending events. ++ * Writing non-zero to intr-mbox-0 additional tells the ++ * NIC to stop sending us irqs, engaging "in-intr-handler" ++ * event coalescing. ++ */ ++ tw32_mailbox(tnapi->int_mbox, 0x00000001); ++ if (likely(!tg3_irq_sync(tp))) ++ napi_schedule_(tp->dev, &tnapi->napi); ++ ++ return IRQ_RETVAL(1); ++} ++ ++#ifdef BCM_HAS_NEW_IRQ_SIG ++static irqreturn_t tg3_interrupt(int irq, void *dev_id) ++#else ++static irqreturn_t tg3_interrupt(int irq, void *dev_id, struct pt_regs *regs) ++#endif ++{ ++ struct tg3_napi *tnapi = dev_id; ++ struct tg3 *tp = tnapi->tp; ++ struct tg3_hw_status *sblk = tnapi->hw_status; ++ unsigned int handled = 1; ++ ++ /* In INTx mode, it is possible for the interrupt to arrive at ++ * the CPU before the status block posted prior to the interrupt. ++ * Reading the PCI State register will confirm whether the ++ * interrupt is ours and will flush the status block. ++ */ ++ if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) { ++ if (tg3_flag(tp, CHIP_RESETTING) || ++ (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) { ++ handled = 0; ++ goto out; ++ } ++ } ++ ++ /* ++ * Writing any value to intr-mbox-0 clears PCI INTA# and ++ * chip-internal interrupt pending events. ++ * Writing non-zero to intr-mbox-0 additional tells the ++ * NIC to stop sending us irqs, engaging "in-intr-handler" ++ * event coalescing. ++ * ++ * Flush the mailbox to de-assert the IRQ immediately to prevent ++ * spurious interrupts. The flush impacts performance but ++ * excessive spurious interrupts can be worse in some cases. ++ */ ++ tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001); ++ if (tg3_irq_sync(tp)) ++ goto out; ++ sblk->status &= ~SD_STATUS_UPDATED; ++ if (likely(tg3_has_work(tnapi))) { ++ prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]); ++ napi_schedule_(tp->dev, &tnapi->napi); ++ } else { ++ /* No work, shared interrupt perhaps? re-enable ++ * interrupts, and flush that PCI write ++ */ ++ tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, ++ 0x00000000); ++ } ++out: ++ return IRQ_RETVAL(handled); ++} ++ ++#ifdef BCM_HAS_NEW_IRQ_SIG ++static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id) ++#else ++static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id, struct pt_regs *regs) ++#endif ++{ ++ struct tg3_napi *tnapi = dev_id; ++ struct tg3 *tp = tnapi->tp; ++ struct tg3_hw_status *sblk = tnapi->hw_status; ++ unsigned int handled = 1; ++ ++ /* In INTx mode, it is possible for the interrupt to arrive at ++ * the CPU before the status block posted prior to the interrupt. ++ * Reading the PCI State register will confirm whether the ++ * interrupt is ours and will flush the status block. ++ */ ++ if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) { ++ if (tg3_flag(tp, CHIP_RESETTING) || ++ (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) { ++ handled = 0; ++ goto out; ++ } ++ } ++ ++ /* ++ * writing any value to intr-mbox-0 clears PCI INTA# and ++ * chip-internal interrupt pending events. ++ * writing non-zero to intr-mbox-0 additional tells the ++ * NIC to stop sending us irqs, engaging "in-intr-handler" ++ * event coalescing. ++ * ++ * Flush the mailbox to de-assert the IRQ immediately to prevent ++ * spurious interrupts. The flush impacts performance but ++ * excessive spurious interrupts can be worse in some cases. ++ */ ++ tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001); ++ ++ /* ++ * In a shared interrupt configuration, sometimes other devices' ++ * interrupts will scream. We record the current status tag here ++ * so that the above check can report that the screaming interrupts ++ * are unhandled. Eventually they will be silenced. ++ */ ++ tnapi->last_irq_tag = sblk->status_tag; ++ ++ if (tg3_irq_sync(tp)) ++ goto out; ++ ++ prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]); ++ ++ napi_schedule_(tp->dev, &tnapi->napi); ++ ++out: ++ return IRQ_RETVAL(handled); ++} ++ ++/* ISR for interrupt test */ ++#ifdef BCM_HAS_NEW_IRQ_SIG ++static irqreturn_t tg3_test_isr(int irq, void *dev_id) ++#else ++static irqreturn_t tg3_test_isr(int irq, void *dev_id, struct pt_regs *regs) ++#endif ++{ ++ struct tg3_napi *tnapi = dev_id; ++ struct tg3 *tp = tnapi->tp; ++ struct tg3_hw_status *sblk = tnapi->hw_status; ++ ++ if ((sblk->status & SD_STATUS_UPDATED) || ++ !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) { ++ tg3_disable_ints(tp); ++ return IRQ_RETVAL(1); ++ } ++ return IRQ_RETVAL(0); ++} ++ ++#ifdef CONFIG_NET_POLL_CONTROLLER ++static void tg3_poll_controller(struct net_device *dev) ++{ ++#ifdef BCM_HAS_NEW_IRQ_SIG ++ int i; ++#endif ++ struct tg3 *tp = netdev_priv(dev); ++ ++ if (tg3_irq_sync(tp)) ++ return; ++ ++#if defined(BCM_HAS_NETDUMP_MODE) && (LINUX_VERSION_CODE < 0x20600) ++ if (netdump_mode) { ++ tg3_interrupt(tp->pdev->irq, dev, NULL); ++ if (dev->poll_list.prev) { ++ int budget = 64; ++ ++ tg3_poll(dev, &budget); ++ } ++ } ++ else ++#endif ++#ifdef BCM_HAS_NEW_IRQ_SIG ++ for (i = 0; i < tp->irq_cnt; i++) ++ tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]); ++#else ++ tg3_interrupt(tp->pdev->irq, dev, NULL); ++#endif ++} ++#endif ++ ++static void tg3_tx_timeout(struct net_device *dev) ++{ ++ struct tg3 *tp = netdev_priv(dev); ++ ++ if (netif_msg_tx_err(tp)) { ++ netdev_err(dev, "transmit timed out, resetting\n"); ++ tg3_dump_state(tp); ++#if defined(__VMKLNX__) ++ if (psod_on_tx_timeout) { ++ msleep(100); ++ BUG_ON(1); ++ return; ++ } ++#endif ++ } ++ ++ tg3_reset_task_schedule(tp); ++} ++ ++/* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */ ++static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len) ++{ ++ u32 base = (u32) mapping & 0xffffffff; ++ ++ return (base + len + 8 < base); ++} ++ ++/* Test for TSO DMA buffers that cross into regions which are within MSS bytes ++ * of any 4GB boundaries: 4G, 8G, etc ++ */ ++static inline int tg3_4g_tso_overflow_test(struct tg3 *tp, dma_addr_t mapping, ++ u32 len, u32 mss) ++{ ++ if (tg3_asic_rev(tp) == ASIC_REV_5762 && mss) { ++ u32 base = (u32) mapping & 0xffffffff; ++ ++ return ((base + len + (mss & 0x3fff)) < base); ++ } ++ return 0; ++} ++ ++/* Test for DMA addresses > 40-bit */ ++static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping, ++ int len) ++{ ++#if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64) ++ if (tg3_flag(tp, 40BIT_DMA_BUG)) ++ return ((u64) mapping + len) > DMA_BIT_MASK(40); ++ return 0; ++#else ++ return 0; ++#endif ++} ++ ++static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd, ++ dma_addr_t mapping, u32 len, u32 flags, ++ u32 mss, u32 vlan) ++{ ++ txbd->addr_hi = ((u64) mapping >> 32); ++ txbd->addr_lo = ((u64) mapping & 0xffffffff); ++ txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff); ++ txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT); ++} ++ ++static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget, ++ dma_addr_t map, u32 len, u32 flags, ++ u32 mss, u32 vlan) ++{ ++ struct tg3 *tp = tnapi->tp; ++ bool hwbug = false; ++ u32 dma_limit = tp->dma_limit; ++ ++ if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8) ++ hwbug = true; ++ ++ if (tg3_4g_overflow_test(map, len)) { ++ tp->dma_4g_cross++; ++ hwbug = true; ++ } ++ ++ if (tg3_4g_tso_overflow_test(tp, map, len, mss)) ++ hwbug = true; ++ ++ if (tg3_40bit_overflow_test(tp, map, len)) ++ hwbug = true; ++ ++ if (dma_limit) { ++ u32 prvidx = *entry; ++ u32 tmp_flag = flags & ~TXD_FLAG_END; ++ while (len > dma_limit && *budget) { ++ u32 frag_len = dma_limit; ++ len -= dma_limit; ++ ++ /* Avoid the 8byte DMA problem */ ++ if (len <= 8) { ++ len += dma_limit / 2; ++ frag_len = dma_limit / 2; ++ } ++ ++ tnapi->tx_buffers[*entry].fragmented = true; ++ ++ tg3_tx_set_bd(&tnapi->tx_ring[*entry], map, ++ frag_len, tmp_flag, mss, vlan); ++ *budget -= 1; ++ prvidx = *entry; ++ *entry = NEXT_TX(*entry); ++ ++ map += frag_len; ++ } ++ ++ if (len) { ++ if (*budget) { ++ tg3_tx_set_bd(&tnapi->tx_ring[*entry], map, ++ len, flags, mss, vlan); ++ *budget -= 1; ++ *entry = NEXT_TX(*entry); ++ } else { ++ hwbug = true; ++ tnapi->tx_buffers[prvidx].fragmented = false; ++ } ++ } ++ } else { ++ tg3_tx_set_bd(&tnapi->tx_ring[*entry], map, ++ len, flags, mss, vlan); ++ *entry = NEXT_TX(*entry); ++ } ++ ++ return hwbug; ++} ++ ++static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last) ++{ ++ int i; ++ struct sk_buff *skb; ++ struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry]; ++ ++ skb = txb->skb; ++ txb->skb = NULL; ++ ++ pci_unmap_single(tnapi->tp->pdev, ++ dma_unmap_addr(txb, mapping), ++ skb_headlen(skb), ++ PCI_DMA_TODEVICE); ++ ++ while (txb->fragmented) { ++ txb->fragmented = false; ++ entry = NEXT_TX(entry); ++ txb = &tnapi->tx_buffers[entry]; ++ } ++ ++ for (i = 0; i <= last; i++) { ++ const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; ++ ++ entry = NEXT_TX(entry); ++ txb = &tnapi->tx_buffers[entry]; ++ ++ pci_unmap_page(tnapi->tp->pdev, ++ dma_unmap_addr(txb, mapping), ++ skb_frag_size(frag), PCI_DMA_TODEVICE); ++ ++ while (txb->fragmented) { ++ txb->fragmented = false; ++ entry = NEXT_TX(entry); ++ txb = &tnapi->tx_buffers[entry]; ++ } ++ } ++} ++ ++/* Workaround 4GB and 40-bit hardware DMA bugs. */ ++static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi, ++ struct sk_buff **pskb, ++ u32 *entry, u32 *budget, ++ u32 base_flags, u32 mss, u32 vlan) ++{ ++ struct tg3 *tp = tnapi->tp; ++ struct sk_buff *new_skb, *skb = *pskb; ++ dma_addr_t new_addr = 0; ++ int ret = 0; ++ ++ if (tg3_asic_rev(tp) != ASIC_REV_5701) ++ new_skb = skb_copy(skb, GFP_ATOMIC); ++ else { ++ int more_headroom = 4 - ((unsigned long)skb->data & 3); ++ ++ new_skb = skb_copy_expand(skb, ++ skb_headroom(skb) + more_headroom, ++ skb_tailroom(skb), GFP_ATOMIC); ++ } ++ ++ if (!new_skb) { ++ ret = -1; ++ } else { ++ /* New SKB is guaranteed to be linear. */ ++ new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len, ++ PCI_DMA_TODEVICE); ++ /* Make sure the mapping succeeded */ ++ if (pci_dma_mapping_error_(tp->pdev, new_addr)) { ++ dev_kfree_skb(new_skb); ++ ret = -1; ++ } else { ++ u32 save_entry = *entry; ++ ++ base_flags |= TXD_FLAG_END; ++ ++ tnapi->tx_buffers[*entry].skb = new_skb; ++ dma_unmap_addr_set(&tnapi->tx_buffers[*entry], ++ mapping, new_addr); ++ ++ if (tg3_tx_frag_set(tnapi, entry, budget, new_addr, ++ new_skb->len, base_flags, ++ mss, vlan)) { ++ tg3_tx_skb_unmap(tnapi, save_entry, -1); ++ dev_kfree_skb(new_skb); ++ ret = -1; ++ } ++ } ++ } ++ ++ dev_kfree_skb(skb); ++ *pskb = new_skb; ++ return ret; ++} ++ ++#if TG3_TSO_SUPPORT != 0 ++static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *); ++ ++/* Use GSO to workaround a rare TSO bug that may be triggered when the ++ * TSO header is greater than 80 bytes. ++ */ ++static netdev_tx_t tg3_tso_bug(struct tg3 *tp, struct tg3_napi *tnapi, ++ struct netdev_queue *txq, struct sk_buff *skb) ++{ ++ struct sk_buff *segs, *nskb; ++ u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3; ++ ++ /* Estimate the number of fragments in the worst case */ ++ if (unlikely(tg3_tx_avail(tnapi) <= frag_cnt_est)) { ++ netif_tx_stop_queue(txq); ++ ++ /* netif_tx_stop_queue() must be done before checking ++ * checking tx index in tg3_tx_avail() below, because in ++ * tg3_tx(), we update tx index before checking for ++ * netif_tx_queue_stopped(). ++ */ ++ smp_mb(); ++ if (tg3_tx_avail(tnapi) <= frag_cnt_est) ++ return NETDEV_TX_BUSY; ++ ++ netif_tx_wake_queue(txq); ++ } ++ ++ segs = skb_gso_segment(skb, tp->dev->features & ++ ~(NETIF_F_TSO | NETIF_F_TSO6)); ++ /* VMWare always returns NULL. Linux will only return NULL ++ * when no segments are required. ++ */ ++ if (!segs || IS_ERR(segs)) ++ goto tg3_tso_bug_end; ++ ++ do { ++ nskb = segs; ++ segs = segs->next; ++ nskb->next = NULL; ++ tg3_start_xmit(nskb, tp->dev); ++ } while (segs); ++ ++tg3_tso_bug_end: ++ dev_kfree_skb(skb); ++ ++ return NETDEV_TX_OK; ++} ++#endif /* TG3_TSO_SUPPORT */ ++ ++/* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and ++ * support TG3_FLAG_HW_TSO_1 or firmware TSO only. ++ */ ++static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) ++{ ++ struct tg3 *tp = netdev_priv(dev); ++ u32 len, entry, base_flags, mss, vlan = 0; ++ u32 budget; ++ int i = -1, would_hit_hwbug; ++ dma_addr_t mapping; ++ struct tg3_napi *tnapi; ++ struct netdev_queue *txq; ++ unsigned int last; ++ struct iphdr *iph = NULL; ++ struct tcphdr *tcph = NULL; ++ __sum16 tcp_csum = 0, ip_csum = 0; ++ __be16 ip_tot_len = 0; ++ ++ txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb)); ++#if defined(__VMKLNX__) && (VMWARE_ESX_DDK_VERSION < 50000) ++ /* For esx4.0/esx4.1u0-u2, the vmkernel doesn't check queue state ++ * before calling start_xmit(). So driver has to check it itself. ++ */ ++ if (unlikely(netif_tx_queue_stopped(txq))) ++ goto drop; ++#endif ++ tnapi = &tp->napi[skb_get_queue_mapping(skb)]; ++ if (tg3_flag(tp, ENABLE_TSS)) ++ tnapi++; ++ ++ budget = tg3_tx_avail(tnapi); ++ ++ /* We are running in BH disabled context with netif_tx_lock ++ * and TX reclaim runs via tp->napi.poll inside of a software ++ * interrupt. Furthermore, IRQ processing runs lockless so we have ++ * no IRQ context deadlocks to worry about either. Rejoice! ++ */ ++ if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) { ++ if (!netif_tx_queue_stopped(txq)) { ++ netif_tx_stop_queue(txq); ++ ++ /* This is a hard error, log it. */ ++ netdev_err(dev, ++ "BUG! Tx Ring full when queue awake!\n"); ++ } ++ return NETDEV_TX_BUSY; ++ } ++ ++ entry = tnapi->tx_prod; ++ base_flags = 0; ++ if (skb->ip_summed == CHECKSUM_PARTIAL) ++ base_flags |= TXD_FLAG_TCPUDP_CSUM; ++ ++#if TG3_TSO_SUPPORT != 0 ++ mss = skb_shinfo(skb)->gso_size; ++ if (mss) { ++ u32 tcp_opt_len, hdr_len; ++ ++ if (skb_header_cloned(skb) && ++ pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) ++ goto drop; ++ ++ iph = ip_hdr(skb); ++ tcp_opt_len = tcp_optlen(skb); ++ ++ hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN; ++ ++ if (!skb_is_gso_v6(skb)) { ++ if (unlikely((ETH_HLEN + hdr_len) > 80) && ++ tg3_flag(tp, TSO_BUG)) ++ return tg3_tso_bug(tp, tnapi, txq, skb); ++ ++ ip_csum = iph->check; ++ ip_tot_len = iph->tot_len; ++ iph->check = 0; ++ iph->tot_len = htons(mss + hdr_len); ++ } ++ ++ if (hdr_len + mss >= skb->len - ETH_HLEN) { ++ mss = 0; ++ goto abort_lso; ++ } ++ ++ base_flags |= (TXD_FLAG_CPU_PRE_DMA | ++ TXD_FLAG_CPU_POST_DMA); ++ ++ tcph = tcp_hdr(skb); ++ tcp_csum = tcph->check; ++ ++ if (tg3_flag(tp, HW_TSO_1) || ++ tg3_flag(tp, HW_TSO_2) || ++ tg3_flag(tp, HW_TSO_3)) { ++ tcph->check = 0; ++ base_flags &= ~TXD_FLAG_TCPUDP_CSUM; ++ } else { ++ tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, ++ 0, IPPROTO_TCP, 0); ++ } ++ ++ if (tg3_flag(tp, HW_TSO_3)) { ++ mss |= (hdr_len & 0xc) << 12; ++ if (hdr_len & 0x10) ++ base_flags |= 0x00000010; ++ base_flags |= (hdr_len & 0x3e0) << 5; ++ } else if (tg3_flag(tp, HW_TSO_2)) ++ mss |= hdr_len << 9; ++ else if (tg3_flag(tp, HW_TSO_1) || ++ tg3_asic_rev(tp) == ASIC_REV_5705) { ++ if (tcp_opt_len || iph->ihl > 5) { ++ int tsflags; ++ ++ tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2); ++ mss |= (tsflags << 11); ++ } ++ } else { ++ if (tcp_opt_len || iph->ihl > 5) { ++ int tsflags; ++ ++ tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2); ++ base_flags |= tsflags << 12; ++ } ++ } ++ } ++abort_lso: ++#else ++ mss = 0; ++#endif ++ ++ if (tg3_flag(tp, USE_JUMBO_BDFLAG) && ++ !mss && skb->len > VLAN_ETH_FRAME_LEN) ++ base_flags |= TXD_FLAG_JMB_PKT; ++ ++#ifdef BCM_KERNEL_SUPPORTS_8021Q ++ if (vlan_tx_tag_present(skb)) { ++ base_flags |= TXD_FLAG_VLAN; ++ vlan = vlan_tx_tag_get(skb); ++ } ++#endif ++ ++#ifdef BCM_KERNEL_SUPPORTS_TIMESTAMPING ++ if ((unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) && ++ tg3_flag(tp, TX_TSTAMP_EN)) { ++ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; ++ base_flags |= TXD_FLAG_HWTSTAMP; ++ } ++#endif /* BCM_KERNEL_SUPPORTS_TIMESTAMPING */ ++ ++ len = skb_headlen(skb); ++ ++ mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE); ++ if (pci_dma_mapping_error_(tp->pdev, mapping)) ++ goto drop; ++ ++ ++ tnapi->tx_buffers[entry].skb = skb; ++ dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping); ++ ++ would_hit_hwbug = 0; ++ ++ if (tg3_flag(tp, 5701_DMA_BUG)) ++ would_hit_hwbug = 1; ++ ++ if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags | ++ ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0), ++ mss, vlan)) { ++ would_hit_hwbug = 1; ++ } else if (skb_shinfo(skb)->nr_frags > 0) { ++ u32 tmp_mss = mss; ++ ++ if (!tg3_flag(tp, HW_TSO_1) && ++ !tg3_flag(tp, HW_TSO_2) && ++ !tg3_flag(tp, HW_TSO_3)) ++ tmp_mss = 0; ++ ++ /* Now loop through additional data ++ * fragments, and queue them. ++ */ ++ last = skb_shinfo(skb)->nr_frags - 1; ++ for (i = 0; i <= last; i++) { ++ skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; ++ ++ len = skb_frag_size(frag); ++ mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0, ++ len, DMA_TO_DEVICE); ++ ++ tnapi->tx_buffers[entry].skb = NULL; ++ dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, ++ mapping); ++ if (dma_mapping_error_(&tp->pdev->dev, mapping)) ++ goto dma_error; ++ ++ if (!budget || ++ tg3_tx_frag_set(tnapi, &entry, &budget, mapping, ++ len, base_flags | ++ ((i == last) ? TXD_FLAG_END : 0), ++ tmp_mss, vlan)) { ++ would_hit_hwbug = 1; ++ break; ++ } ++ } ++ } ++ ++ if (would_hit_hwbug) { ++ tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i); ++ ++#if !defined(__VMKLNX__) ++ if (mss) { ++ /* If it's a TSO packet, do GSO instead of ++ * allocating and copying to a large linear SKB. ++ */ ++ if (ip_tot_len) { ++ iph->check = ip_csum; ++ iph->tot_len = ip_tot_len; ++ } ++ tcph->check = tcp_csum; ++ return tg3_tso_bug(tp, tnapi, txq, skb); ++ } ++#endif ++ ++ /* If the workaround fails due to memory/mapping ++ * failure, silently drop this packet. ++ */ ++ entry = tnapi->tx_prod; ++ budget = tg3_tx_avail(tnapi); ++ if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget, ++ base_flags, mss, vlan)) ++ goto drop_nofree; ++ } ++ ++ skb_tx_timestamp(skb); ++ netdev_tx_sent_queue(txq, skb->len); ++ ++ /* Sync BD data before updating mailbox */ ++ wmb(); ++ ++ /* Packets are ready, update Tx producer idx local and on card. */ ++ tw32_tx_mbox(tnapi->prodmbox, entry); ++ ++ tnapi->tx_prod = entry; ++ if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) { ++ netif_tx_stop_queue(txq); ++ ++ /* netif_tx_stop_queue() must be done before checking ++ * checking tx index in tg3_tx_avail() below, because in ++ * tg3_tx(), we update tx index before checking for ++ * netif_tx_queue_stopped(). ++ */ ++ smp_mb(); ++ if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)) ++ netif_tx_wake_queue(txq); ++ } ++ ++ mmiowb(); ++ ++ tg3_update_trans_start(dev); ++ ++ return NETDEV_TX_OK; ++ ++dma_error: ++ tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i); ++ tnapi->tx_buffers[tnapi->tx_prod].skb = NULL; ++drop: ++ dev_kfree_skb(skb); ++drop_nofree: ++ tp->tx_dropped++; ++ return NETDEV_TX_OK; ++} ++ ++static void tg3_mac_loopback(struct tg3 *tp, bool enable) ++{ ++ if (enable) { ++ tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX | ++ MAC_MODE_PORT_MODE_MASK); ++ ++ tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK; ++ ++ if (!tg3_flag(tp, 5705_PLUS)) ++ tp->mac_mode |= MAC_MODE_LINK_POLARITY; ++ ++ if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY) ++ tp->mac_mode |= MAC_MODE_PORT_MODE_MII; ++ else ++ tp->mac_mode |= MAC_MODE_PORT_MODE_GMII; ++ } else { ++ tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK; ++ ++ if (tg3_flag(tp, 5705_PLUS) || ++ (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) || ++ tg3_asic_rev(tp) == ASIC_REV_5700) ++ tp->mac_mode &= ~MAC_MODE_LINK_POLARITY; ++ } ++ ++ tw32(MAC_MODE, tp->mac_mode); ++ udelay(40); ++} ++ ++static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk) ++{ ++ u32 val, bmcr, mac_mode, ptest = 0; ++ ++ tg3_phy_toggle_apd(tp, false); ++ tg3_phy_toggle_automdix(tp, false); ++ ++ if (extlpbk && tg3_phy_set_extloopbk(tp)) ++ return -EIO; ++ ++ bmcr = BMCR_FULLDPLX; ++ switch (speed) { ++ case SPEED_10: ++ break; ++ case SPEED_100: ++ bmcr |= BMCR_SPEED100; ++ break; ++ case SPEED_1000: ++ default: ++ if (tp->phy_flags & TG3_PHYFLG_IS_FET) { ++ speed = SPEED_100; ++ bmcr |= BMCR_SPEED100; ++ } else { ++ speed = SPEED_1000; ++ bmcr |= BMCR_SPEED1000; ++ } ++ } ++ ++ if (extlpbk) { ++ if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) { ++ tg3_readphy(tp, MII_CTRL1000, &val); ++ val |= CTL1000_AS_MASTER | ++ CTL1000_ENABLE_MASTER; ++ tg3_writephy(tp, MII_CTRL1000, val); ++ } else { ++ ptest = MII_TG3_FET_PTEST_TRIM_SEL | ++ MII_TG3_FET_PTEST_TRIM_2; ++ tg3_writephy(tp, MII_TG3_FET_PTEST, ptest); ++ } ++ } else ++ bmcr |= BMCR_LOOPBACK; ++ ++ tg3_writephy(tp, MII_BMCR, bmcr); ++ ++ /* The write needs to be flushed for the FETs */ ++ if (tp->phy_flags & TG3_PHYFLG_IS_FET) ++ tg3_readphy(tp, MII_BMCR, &bmcr); ++ ++ udelay(40); ++ ++ if ((tp->phy_flags & TG3_PHYFLG_IS_FET) && ++ tg3_asic_rev(tp) == ASIC_REV_5785) { ++ tg3_writephy(tp, MII_TG3_FET_PTEST, ptest | ++ MII_TG3_FET_PTEST_FRC_TX_LINK | ++ MII_TG3_FET_PTEST_FRC_TX_LOCK); ++ ++ /* The write needs to be flushed for the AC131 */ ++ tg3_readphy(tp, MII_TG3_FET_PTEST, &val); ++ } ++ ++ /* Reset to prevent losing 1st rx packet intermittently */ ++ if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) && ++ tg3_flag(tp, 5780_CLASS)) { ++ tw32_f(MAC_RX_MODE, RX_MODE_RESET); ++ udelay(10); ++ tw32_f(MAC_RX_MODE, tp->rx_mode); ++ } ++ ++ mac_mode = tp->mac_mode & ++ ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX); ++ if (speed == SPEED_1000) ++ mac_mode |= MAC_MODE_PORT_MODE_GMII; ++ else ++ mac_mode |= MAC_MODE_PORT_MODE_MII; ++ ++ if (tg3_asic_rev(tp) == ASIC_REV_5700) { ++ u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK; ++ ++ if (masked_phy_id == TG3_PHY_ID_BCM5401) ++ mac_mode &= ~MAC_MODE_LINK_POLARITY; ++ else if (masked_phy_id == TG3_PHY_ID_BCM5411) ++ mac_mode |= MAC_MODE_LINK_POLARITY; ++ ++ tg3_writephy(tp, MII_TG3_EXT_CTRL, ++ MII_TG3_EXT_CTRL_LNK3_LED_MODE); ++ } ++ ++ tw32(MAC_MODE, mac_mode); ++ udelay(40); ++ ++ return 0; ++} ++ ++#ifdef BCM_HAS_FIX_FEATURES ++static void tg3_set_loopback(struct net_device *dev, netdev_features_t features) ++{ ++ struct tg3 *tp = netdev_priv(dev); ++ ++ if (features & NETIF_F_LOOPBACK) { ++ if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK) ++ return; ++ ++ spin_lock_bh(&tp->lock); ++ tg3_mac_loopback(tp, true); ++ netif_carrier_on(tp->dev); ++ spin_unlock_bh(&tp->lock); ++ netdev_info(dev, "Internal MAC loopback mode enabled.\n"); ++ } else { ++ if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)) ++ return; ++ ++ spin_lock_bh(&tp->lock); ++ tg3_mac_loopback(tp, false); ++ /* Force link status check */ ++ tg3_setup_phy(tp, true); ++ spin_unlock_bh(&tp->lock); ++ netdev_info(dev, "Internal MAC loopback mode disabled.\n"); ++ } ++} ++ ++#if defined(GET_NETDEV_OP_EXT) ++static u32 tg3_fix_features(struct net_device *dev, u32 features) ++#else ++static netdev_features_t tg3_fix_features(struct net_device *dev, ++ netdev_features_t features) ++#endif ++{ ++ struct tg3 *tp = netdev_priv(dev); ++ ++ if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS)) ++ features &= ~NETIF_F_ALL_TSO; ++ ++ return features; ++} ++ ++#if defined(GET_NETDEV_OP_EXT) ++static int tg3_set_features(struct net_device *dev, u32 features) ++#else ++static int tg3_set_features(struct net_device *dev, netdev_features_t features) ++#endif ++{ ++ netdev_features_t changed = dev->features ^ features; ++ ++ if ((changed & NETIF_F_LOOPBACK) && netif_running(dev)) ++ tg3_set_loopback(dev, features); ++ ++ return 0; ++} ++#endif /* BCM_HAS_FIX_FEATURES */ ++ ++static void tg3_rx_prodring_free(struct tg3 *tp, ++ struct tg3_rx_prodring_set *tpr) ++{ ++ int i; ++ ++#ifdef TG3_VMWARE_NETQ_ENABLE ++ if (tg3_flag(tp, ENABLE_RSS)) ++#endif ++ if (tpr != &tp->napi[0].prodring) { ++ for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx; ++ i = (i + 1) & tp->rx_std_ring_mask) ++ tg3_rx_data_free(tp, &tpr->rx_std_buffers[i], ++ tp->rx_pkt_map_sz); ++ ++ if (tg3_flag(tp, JUMBO_CAPABLE)) { ++ for (i = tpr->rx_jmb_cons_idx; ++ i != tpr->rx_jmb_prod_idx; ++ i = (i + 1) & tp->rx_jmb_ring_mask) { ++ tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i], ++ TG3_RX_JMB_MAP_SZ); ++ } ++ } ++ ++ return; ++ } ++ ++ for (i = 0; i <= tp->rx_std_ring_mask; i++) ++ tg3_rx_data_free(tp, &tpr->rx_std_buffers[i], ++ tp->rx_pkt_map_sz); ++ ++ if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) { ++ for (i = 0; i <= tp->rx_jmb_ring_mask; i++) ++ tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i], ++ TG3_RX_JMB_MAP_SZ); ++ } ++} ++ ++/* Initialize rx rings for packet processing. ++ * ++ * The chip has been shut down and the driver detached from ++ * the networking, so no interrupts or new tx packets will ++ * end up in the driver. tp->{tx,}lock are held and thus ++ * we may not sleep. ++ */ ++static int tg3_rx_prodring_alloc(struct tg3 *tp, ++ struct tg3_rx_prodring_set *tpr) ++{ ++ u32 i, rx_pkt_dma_sz; ++ ++ tpr->rx_std_cons_idx = 0; ++ tpr->rx_std_prod_idx = 0; ++ tpr->rx_jmb_cons_idx = 0; ++ tpr->rx_jmb_prod_idx = 0; ++ ++#ifdef TG3_VMWARE_NETQ_ENABLE ++ if (tg3_flag(tp, ENABLE_RSS)) ++#endif ++ if (tpr != &tp->napi[0].prodring) { ++ memset(&tpr->rx_std_buffers[0], 0, ++ TG3_RX_STD_BUFF_RING_SIZE(tp)); ++ if (tpr->rx_jmb_buffers) ++ memset(&tpr->rx_jmb_buffers[0], 0, ++ TG3_RX_JMB_BUFF_RING_SIZE(tp)); ++ goto done; ++ } ++ ++ /* Zero out all descriptors. */ ++ memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp)); ++ ++ rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ; ++ if (tg3_flag(tp, 5780_CLASS) && ++ tp->dev->mtu > ETH_DATA_LEN) ++ rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ; ++ tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz); ++ ++ /* Initialize invariants of the rings, we only set this ++ * stuff once. This works because the card does not ++ * write into the rx buffer posting rings. ++ */ ++ for (i = 0; i <= tp->rx_std_ring_mask; i++) { ++ struct tg3_rx_buffer_desc *rxd; ++ ++ rxd = &tpr->rx_std[i]; ++ rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT; ++ rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT); ++ rxd->opaque = (RXD_OPAQUE_RING_STD | ++ (i << RXD_OPAQUE_INDEX_SHIFT)); ++ } ++ ++ /* Now allocate fresh SKBs for each rx ring. */ ++ for (i = 0; i < tp->rx_pending; i++) { ++ unsigned int frag_size; ++ ++ if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i, ++ &frag_size) < 0) { ++ netdev_warn(tp->dev, ++ "Using a smaller RX standard ring. Only " ++ "%d out of %d buffers were allocated " ++ "successfully\n", i, tp->rx_pending); ++ if (i == 0) ++ goto initfail; ++ tp->rx_pending = i; ++ break; ++ } ++ } ++ ++ if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS)) ++ goto done; ++ ++ memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp)); ++ ++ if (!tg3_flag(tp, JUMBO_RING_ENABLE)) ++ goto done; ++ ++ for (i = 0; i <= tp->rx_jmb_ring_mask; i++) { ++ struct tg3_rx_buffer_desc *rxd; ++ ++ rxd = &tpr->rx_jmb[i].std; ++ rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT; ++ rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) | ++ RXD_FLAG_JUMBO; ++ rxd->opaque = (RXD_OPAQUE_RING_JUMBO | ++ (i << RXD_OPAQUE_INDEX_SHIFT)); ++ } ++ ++ for (i = 0; i < tp->rx_jumbo_pending; i++) { ++ unsigned int frag_size; ++ ++ if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i, ++ &frag_size) < 0) { ++ netdev_warn(tp->dev, ++ "Using a smaller RX jumbo ring. Only %d " ++ "out of %d buffers were allocated " ++ "successfully\n", i, tp->rx_jumbo_pending); ++ if (i == 0) ++ goto initfail; ++ tp->rx_jumbo_pending = i; ++ break; ++ } ++ } ++ ++done: ++ return 0; ++ ++initfail: ++ tg3_rx_prodring_free(tp, tpr); ++ return -ENOMEM; ++} ++ ++static void tg3_rx_prodring_fini(struct tg3 *tp, ++ struct tg3_rx_prodring_set *tpr) ++{ ++ kfree(tpr->rx_std_buffers); ++ tpr->rx_std_buffers = NULL; ++ kfree(tpr->rx_jmb_buffers); ++ tpr->rx_jmb_buffers = NULL; ++ if (tpr->rx_std) { ++ dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp), ++ tpr->rx_std, tpr->rx_std_mapping); ++ tpr->rx_std = NULL; ++ } ++ if (tpr->rx_jmb) { ++ dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp), ++ tpr->rx_jmb, tpr->rx_jmb_mapping); ++ tpr->rx_jmb = NULL; ++ } ++} ++ ++static int tg3_rx_prodring_init(struct tg3 *tp, ++ struct tg3_rx_prodring_set *tpr) ++{ ++ tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp), ++ GFP_KERNEL); ++ if (!tpr->rx_std_buffers) ++ return -ENOMEM; ++ ++ tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev, ++ TG3_RX_STD_RING_BYTES(tp), ++ &tpr->rx_std_mapping, ++ GFP_KERNEL); ++ if (!tpr->rx_std) ++ goto err_out; ++ ++ if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) { ++ tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp), ++ GFP_KERNEL); ++ if (!tpr->rx_jmb_buffers) ++ goto err_out; ++ ++ tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev, ++ TG3_RX_JMB_RING_BYTES(tp), ++ &tpr->rx_jmb_mapping, ++ GFP_KERNEL); ++ if (!tpr->rx_jmb) ++ goto err_out; ++ } ++ ++ return 0; ++ ++err_out: ++ tg3_rx_prodring_fini(tp, tpr); ++ return -ENOMEM; ++} ++ ++/* Free up pending packets in all rx/tx rings. ++ * ++ * The chip has been shut down and the driver detached from ++ * the networking, so no interrupts or new tx packets will ++ * end up in the driver. tp->{tx,}lock is not held and we are not ++ * in an interrupt context and thus may sleep. ++ */ ++static void tg3_free_rings(struct tg3 *tp) ++{ ++ int i, j; ++ ++ for (j = 0; j < tp->irq_cnt; j++) { ++ struct tg3_napi *tnapi = &tp->napi[j]; ++ ++ tg3_rx_prodring_free(tp, &tnapi->prodring); ++ ++ if (!tnapi->tx_buffers) ++ continue; ++ ++ for (i = 0; i < TG3_TX_RING_SIZE; i++) { ++ struct sk_buff *skb = tnapi->tx_buffers[i].skb; ++ ++ if (!skb) ++ continue; ++ ++ tg3_tx_skb_unmap(tnapi, i, ++ skb_shinfo(skb)->nr_frags - 1); ++ ++ dev_kfree_skb_any(skb); ++ } ++ netdev_tx_reset_queue(netdev_get_tx_queue(tp->dev, j)); ++ } ++} ++ ++/* Initialize tx/rx rings for packet processing. ++ * ++ * The chip has been shut down and the driver detached from ++ * the networking, so no interrupts or new tx packets will ++ * end up in the driver. tp->{tx,}lock are held and thus ++ * we may not sleep. ++ */ ++static int tg3_init_rings(struct tg3 *tp) ++{ ++ int i; ++ ++ /* Free up all the SKBs. */ ++ tg3_free_rings(tp); ++ ++ for (i = 0; i < tp->irq_cnt; i++) { ++ struct tg3_napi *tnapi = &tp->napi[i]; ++ ++ tnapi->last_tag = 0; ++ tnapi->last_irq_tag = 0; ++ tnapi->hw_status->status = 0; ++ tnapi->hw_status->status_tag = 0; ++ memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE); ++ ++ tnapi->tx_prod = 0; ++ tnapi->tx_cons = 0; ++ if (tnapi->tx_ring) ++ memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES); ++ ++ tnapi->rx_rcb_ptr = 0; ++ if (tnapi->rx_rcb) ++ memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp)); ++ ++#ifdef TG3_VMWARE_NETQ_ENABLE ++ if (!i || (i && tg3_flag(tp, ENABLE_RSS))) ++#endif ++ if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) { ++ tg3_free_rings(tp); ++ return -ENOMEM; ++ } ++ } ++ ++ return 0; ++} ++ ++static void tg3_mem_tx_release(struct tg3 *tp) ++{ ++ int i; ++ ++ for (i = 0; i < tp->irq_max; i++) { ++ struct tg3_napi *tnapi = &tp->napi[i]; ++ ++ if (tnapi->tx_ring) { ++ dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES, ++ tnapi->tx_ring, tnapi->tx_desc_mapping); ++ tnapi->tx_ring = NULL; ++ } ++ ++ kfree(tnapi->tx_buffers); ++ tnapi->tx_buffers = NULL; ++ } ++} ++ ++static int tg3_mem_tx_acquire(struct tg3 *tp) ++{ ++ int i; ++ struct tg3_napi *tnapi = &tp->napi[0]; ++ ++ /* If multivector TSS is enabled, vector 0 does not handle ++ * tx interrupts. Don't allocate any resources for it. ++ */ ++ if (tg3_flag(tp, ENABLE_TSS)) ++ tnapi++; ++ ++ for (i = 0; i < tp->txq_cnt; i++, tnapi++) { ++ tnapi->tx_buffers = kzalloc(sizeof(struct tg3_tx_ring_info) * ++ TG3_TX_RING_SIZE, GFP_KERNEL); ++ if (!tnapi->tx_buffers) ++ goto err_out; ++ ++ tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev, ++ TG3_TX_RING_BYTES, ++ &tnapi->tx_desc_mapping, ++ GFP_KERNEL); ++ if (!tnapi->tx_ring) ++ goto err_out; ++ } ++ ++ return 0; ++ ++err_out: ++ tg3_mem_tx_release(tp); ++ return -ENOMEM; ++} ++ ++static void tg3_mem_rx_release(struct tg3 *tp) ++{ ++ int i; ++ ++ for (i = 0; i < tp->irq_max; i++) { ++ struct tg3_napi *tnapi = &tp->napi[i]; ++ ++ tg3_rx_prodring_fini(tp, &tnapi->prodring); ++ ++ if (!tnapi->rx_rcb) ++ continue; ++ ++ dma_free_coherent(&tp->pdev->dev, ++ TG3_RX_RCB_RING_BYTES(tp), ++ tnapi->rx_rcb, ++ tnapi->rx_rcb_mapping); ++ tnapi->rx_rcb = NULL; ++ } ++} ++ ++static int tg3_mem_rx_acquire(struct tg3 *tp) ++{ ++ unsigned int i, limit; ++ ++ limit = tp->rxq_cnt; ++ ++ /* If RSS is enabled, we need a (dummy) producer ring ++ * set on vector zero. This is the true hw prodring. ++ */ ++ if (tg3_flag(tp, ENABLE_RSS)) ++ limit++; ++ ++ for (i = 0; i < limit; i++) { ++ struct tg3_napi *tnapi = &tp->napi[i]; ++ ++ if (tg3_rx_prodring_init(tp, &tnapi->prodring)) ++ goto err_out; ++ ++ if (tg3_flag(tp, ENABLE_IOV)) ++ tnapi->srcprodring = &tnapi->prodring; ++ else ++ tnapi->srcprodring = &tp->napi[0].prodring; ++ ++ /* If multivector RSS is enabled, vector 0 ++ * does not handle rx or tx interrupts. ++ * Don't allocate any resources for it. ++ */ ++ if (!i && tg3_flag(tp, ENABLE_RSS)) ++ continue; ++ ++ tnapi->rx_rcb = dma_zalloc_coherent(&tp->pdev->dev, ++ TG3_RX_RCB_RING_BYTES(tp), ++ &tnapi->rx_rcb_mapping, ++ GFP_KERNEL); ++ if (!tnapi->rx_rcb) ++ goto err_out; ++ } ++ ++ return 0; ++ ++err_out: ++ tg3_mem_rx_release(tp); ++ return -ENOMEM; ++} ++ ++/* ++ * Must not be invoked with interrupt sources disabled and ++ * the hardware shutdown down. ++ */ ++static void tg3_free_consistent(struct tg3 *tp) ++{ ++ int i; ++ ++ for (i = 0; i < tp->irq_cnt; i++) { ++ struct tg3_napi *tnapi = &tp->napi[i]; ++ ++ if (tnapi->hw_status) { ++ dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE, ++ tnapi->hw_status, ++ tnapi->status_mapping); ++ tnapi->hw_status = NULL; ++ } ++ } ++ ++ tg3_mem_rx_release(tp); ++ tg3_mem_tx_release(tp); ++ ++ if (tp->hw_stats) { ++ dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats), ++ tp->hw_stats, tp->stats_mapping); ++ tp->hw_stats = NULL; ++ } ++} ++ ++/* ++ * Must not be invoked with interrupt sources disabled and ++ * the hardware shutdown down. Can sleep. ++ */ ++static int tg3_alloc_consistent(struct tg3 *tp) ++{ ++ int i; ++ ++ tp->hw_stats = dma_zalloc_coherent(&tp->pdev->dev, ++ sizeof(struct tg3_hw_stats), ++ &tp->stats_mapping, GFP_KERNEL); ++ if (!tp->hw_stats) ++ goto err_out; ++ ++ for (i = 0; i < tp->irq_cnt; i++) { ++ struct tg3_napi *tnapi = &tp->napi[i]; ++ struct tg3_hw_status *sblk; ++ ++ tnapi->hw_status = dma_zalloc_coherent(&tp->pdev->dev, ++ TG3_HW_STATUS_SIZE, ++ &tnapi->status_mapping, ++ GFP_KERNEL); ++ if (!tnapi->hw_status) ++ goto err_out; ++ ++ sblk = tnapi->hw_status; ++ ++ if (tg3_flag(tp, ENABLE_RSS)) { ++ volatile u16 *prodptr = NULL; ++ ++ /* When RSS is enabled, the status block format changes ++ * slightly. The "rx_jumbo_consumer", "reserved", ++ * and "rx_mini_consumer" members get mapped to the ++ * other three rx return ring producer indexes. ++ */ ++ switch (i) { ++ case 1: ++ prodptr = &sblk->idx[0].rx_producer; ++ break; ++ case 2: ++ prodptr = &sblk->rx_jumbo_consumer; ++ break; ++ case 3: ++ prodptr = &sblk->reserved; ++ break; ++ case 4: ++ prodptr = &sblk->rx_mini_consumer; ++ break; ++ } ++ tnapi->rx_rcb_prod_idx = prodptr; ++ } else ++ tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer; ++ } ++ ++ if (tg3_mem_tx_acquire(tp) || tg3_mem_rx_acquire(tp)) ++ goto err_out; ++ ++ return 0; ++ ++err_out: ++ tg3_free_consistent(tp); ++ return -ENOMEM; ++} ++ ++#define MAX_WAIT_CNT 1000 ++ ++/* To stop a block, clear the enable bit and poll till it ++ * clears. tp->lock is held. ++ */ ++static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, bool silent) ++{ ++ unsigned int i; ++ u32 val; ++ ++ if (tg3_flag(tp, 5705_PLUS)) { ++ switch (ofs) { ++ case RCVLSC_MODE: ++ case DMAC_MODE: ++ case MBFREE_MODE: ++ case BUFMGR_MODE: ++ case MEMARB_MODE: ++ /* We can't enable/disable these bits of the ++ * 5705/5750, just say success. ++ */ ++ return 0; ++ ++ default: ++ break; ++ } ++ } ++ ++ val = tr32(ofs); ++ val &= ~enable_bit; ++ tw32_f(ofs, val); ++ ++ for (i = 0; i < MAX_WAIT_CNT; i++) { ++ if (pci_channel_offline(tp->pdev)) { ++ dev_err(&tp->pdev->dev, ++ "tg3_stop_block device offline, " ++ "ofs=%lx enable_bit=%x\n", ++ ofs, enable_bit); ++ return -ENODEV; ++ } ++ ++ udelay(100); ++ val = tr32(ofs); ++ if ((val & enable_bit) == 0) ++ break; ++ } ++ ++ if (i == MAX_WAIT_CNT && !silent) { ++ dev_err(&tp->pdev->dev, ++ "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n", ++ ofs, enable_bit); ++ return -ENODEV; ++ } ++ ++ return 0; ++} ++ ++/* tp->lock is held. */ ++static int tg3_abort_hw(struct tg3 *tp, bool silent) ++{ ++ int i, err; ++ ++ tg3_disable_ints(tp); ++ ++ if (pci_channel_offline(tp->pdev)) { ++ tp->rx_mode &= ~(RX_MODE_ENABLE | TX_MODE_ENABLE); ++ tp->mac_mode &= ~MAC_MODE_TDE_ENABLE; ++ err = -ENODEV; ++ goto err_no_dev; ++ } ++ ++ tp->rx_mode &= ~RX_MODE_ENABLE; ++ tw32_f(MAC_RX_MODE, tp->rx_mode); ++ udelay(10); ++ ++ err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent); ++ err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent); ++ err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent); ++ err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent); ++ err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent); ++ err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent); ++ ++ err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent); ++ err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent); ++ err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent); ++ err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent); ++ err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent); ++ err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent); ++ err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent); ++ ++ tp->mac_mode &= ~MAC_MODE_TDE_ENABLE; ++ tw32_f(MAC_MODE, tp->mac_mode); ++ udelay(40); ++ ++ tp->tx_mode &= ~TX_MODE_ENABLE; ++ tw32_f(MAC_TX_MODE, tp->tx_mode); ++ ++ for (i = 0; i < MAX_WAIT_CNT; i++) { ++ udelay(100); ++ if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE)) ++ break; ++ } ++ if (i >= MAX_WAIT_CNT) { ++ dev_err(&tp->pdev->dev, ++ "%s timed out, TX_MODE_ENABLE will not clear " ++ "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE)); ++ err |= -ENODEV; ++ } ++ ++ err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent); ++ err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent); ++ err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent); ++ ++ tw32(FTQ_RESET, 0xffffffff); ++ tw32(FTQ_RESET, 0x00000000); ++ ++ err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent); ++ err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent); ++ ++err_no_dev: ++ for (i = 0; i < tp->irq_cnt; i++) { ++ struct tg3_napi *tnapi = &tp->napi[i]; ++ if (tnapi->hw_status) ++ memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE); ++ } ++ ++ return err; ++} ++ ++/* Save PCI command register before chip reset */ ++static void tg3_save_pci_state(struct tg3 *tp) ++{ ++ pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd); ++} ++ ++/* Restore PCI state after chip reset */ ++static void tg3_restore_pci_state(struct tg3 *tp) ++{ ++ u32 val; ++ ++ /* Re-enable indirect register accesses. */ ++ pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL, ++ tp->misc_host_ctrl); ++ ++ /* Set MAX PCI retry to zero. */ ++ val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE); ++ if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 && ++ tg3_flag(tp, PCIX_MODE)) ++ val |= PCISTATE_RETRY_SAME_DMA; ++ /* Allow reads and writes to the APE register and memory space. */ ++ if (tg3_flag(tp, ENABLE_APE)) ++ val |= PCISTATE_ALLOW_APE_CTLSPC_WR | ++ PCISTATE_ALLOW_APE_SHMEM_WR | ++ PCISTATE_ALLOW_APE_PSPACE_WR; ++ pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val); ++ ++ pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd); ++ ++ if (!tg3_flag(tp, PCI_EXPRESS)) { ++ pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, ++ tp->pci_cacheline_sz); ++ pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER, ++ tp->pci_lat_timer); ++ } ++ ++ /* Make sure PCI-X relaxed ordering bit is clear. */ ++ if (tg3_flag(tp, PCIX_MODE)) { ++ u16 pcix_cmd; ++ ++ pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD, ++ &pcix_cmd); ++ pcix_cmd &= ~PCI_X_CMD_ERO; ++ pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD, ++ pcix_cmd); ++ } ++ ++ if (tg3_flag(tp, 5780_CLASS)) { ++ ++ /* Chip reset on 5780 will reset MSI enable bit, ++ * so need to restore it. ++ */ ++ if (tg3_flag(tp, USING_MSI)) { ++ u16 ctrl; ++ ++ pci_read_config_word(tp->pdev, ++ tp->msi_cap + PCI_MSI_FLAGS, ++ &ctrl); ++ pci_write_config_word(tp->pdev, ++ tp->msi_cap + PCI_MSI_FLAGS, ++ ctrl | PCI_MSI_FLAGS_ENABLE); ++ val = tr32(MSGINT_MODE); ++ tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE); ++ } ++ } ++ ++ tg3_disable_ints(tp); ++} ++ ++static void tg3_override_clk(struct tg3 *tp) ++{ ++ u32 val; ++ ++ switch (tg3_asic_rev(tp)) { ++ case ASIC_REV_5717: ++ val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE); ++ tw32(TG3_CPMU_CLCK_ORIDE_ENABLE, val | ++ TG3_CPMU_MAC_ORIDE_ENABLE); ++ break; ++ ++ case ASIC_REV_5719: ++ case ASIC_REV_5720: ++ tw32(TG3_CPMU_CLCK_ORIDE, CPMU_CLCK_ORIDE_MAC_ORIDE_EN); ++ break; ++ ++ default: ++ return; ++ } ++} ++ ++static void tg3_restore_clk(struct tg3 *tp) ++{ ++ u32 val; ++ ++ switch (tg3_asic_rev(tp)) { ++ case ASIC_REV_5717: ++ val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE); ++ tw32(TG3_CPMU_CLCK_ORIDE_ENABLE, ++ val & ~TG3_CPMU_MAC_ORIDE_ENABLE); ++ break; ++ ++ case ASIC_REV_5719: ++ case ASIC_REV_5720: ++ val = tr32(TG3_CPMU_CLCK_ORIDE); ++ tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN); ++ break; ++ ++ default: ++ return; ++ } ++} ++ ++/* tp->lock is held. */ ++static int tg3_chip_reset(struct tg3 *tp) ++{ ++ u32 val; ++ void (*write_op)(struct tg3 *, u32, u32); ++ int i, err; ++ ++ if (!pci_device_is_present(tp->pdev)) ++ return -ENODEV; ++ ++ tg3_nvram_lock(tp); ++ ++ tg3_ape_lock(tp, TG3_APE_LOCK_GRC); ++ ++ /* No matching tg3_nvram_unlock() after this because ++ * chip reset below will undo the nvram lock. ++ */ ++ tp->nvram_lock_cnt = 0; ++ ++ /* GRC_MISC_CFG core clock reset will clear the memory ++ * enable bit in PCI register 4 and the MSI enable bit ++ * on some chips, so we save relevant registers here. ++ */ ++ tg3_save_pci_state(tp); ++ ++ if (tg3_asic_rev(tp) == ASIC_REV_5752 || ++ tg3_flag(tp, 5755_PLUS)) ++ tw32(GRC_FASTBOOT_PC, 0); ++ ++ /* ++ * We must avoid the readl() that normally takes place. ++ * It locks machines, causes machine checks, and other ++ * fun things. So, temporarily disable the 5701 ++ * hardware workaround, while we do the reset. ++ */ ++ write_op = tp->write32; ++ if (write_op == tg3_write_flush_reg32) ++ tp->write32 = tg3_write32; ++ ++ /* Prevent the irq handler from reading or writing PCI registers ++ * during chip reset when the memory enable bit in the PCI command ++ * register may be cleared. The chip does not generate interrupt ++ * at this time, but the irq handler may still be called due to irq ++ * sharing or irqpoll. ++ */ ++ tg3_flag_set(tp, CHIP_RESETTING); ++ for (i = 0; i < tp->irq_cnt; i++) { ++ struct tg3_napi *tnapi = &tp->napi[i]; ++ if (tnapi->hw_status) { ++ tnapi->hw_status->status = 0; ++ tnapi->hw_status->status_tag = 0; ++ } ++ tnapi->last_tag = 0; ++ tnapi->last_irq_tag = 0; ++ } ++ smp_mb(); ++ ++#if (LINUX_VERSION_CODE >= 0x2051c) ++ for (i = 0; i < tp->irq_cnt; i++) ++ synchronize_irq(tp->napi[i].irq_vec); ++#else ++ synchronize_irq(); ++#endif ++ ++ if (tg3_asic_rev(tp) == ASIC_REV_57780) { ++ val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN; ++ tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS); ++ } ++ ++ /* do the reset */ ++ val = GRC_MISC_CFG_CORECLK_RESET; ++ ++ if (tg3_flag(tp, PCI_EXPRESS)) { ++ /* Force PCIe 1.0a mode */ ++ if (tg3_asic_rev(tp) != ASIC_REV_5785 && ++ !tg3_flag(tp, 57765_PLUS) && ++ tr32(TG3_PCIE_PHY_TSTCTL) == ++ (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM)) ++ tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM); ++ ++ if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0) { ++ tw32(GRC_MISC_CFG, (1 << 29)); ++ val |= (1 << 29); ++ } ++ } ++ ++ if (tg3_asic_rev(tp) == ASIC_REV_5906) { ++ tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET); ++ tw32(GRC_VCPU_EXT_CTRL, ++ tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU); ++ } ++ ++ /* Set the clock to the highest frequency to avoid timeouts. With link ++ * aware mode, the clock speed could be slow and bootcode does not ++ * complete within the expected time. Override the clock to allow the ++ * bootcode to finish sooner and then restore it. A later bootcode will ++ * implement this workaround at which time this change must be removed ++ * from the driver. ++ */ ++ tg3_override_clk(tp); ++ ++ /* Manage gphy power for all CPMU absent PCIe devices. */ ++ if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT)) ++ val |= GRC_MISC_CFG_KEEP_GPHY_POWER; ++ ++ tw32(GRC_MISC_CFG, val); ++ ++ /* restore 5701 hardware bug workaround write method */ ++ tp->write32 = write_op; ++ ++ /* Unfortunately, we have to delay before the PCI read back. ++ * Some 575X chips even will not respond to a PCI cfg access ++ * when the reset command is given to the chip. ++ * ++ * How do these hardware designers expect things to work ++ * properly if the PCI write is posted for a long period ++ * of time? It is always necessary to have some method by ++ * which a register read back can occur to push the write ++ * out which does the reset. ++ * ++ * For most tg3 variants the trick below was working. ++ * Ho hum... ++ */ ++ udelay(120); ++ ++ /* Flush PCI posted writes. The normal MMIO registers ++ * are inaccessible at this time so this is the only ++ * way to make this reliably (actually, this is no longer ++ * the case, see above). I tried to use indirect ++ * register read/write but this upset some 5701 variants. ++ */ ++ pci_read_config_dword(tp->pdev, PCI_COMMAND, &val); ++ ++ udelay(120); ++ ++ if (tg3_flag(tp, PCI_EXPRESS) && pci_is_pcie(tp->pdev)) { ++ u16 val16; ++ ++ if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0) { ++ int j; ++ u32 cfg_val; ++ ++ /* Wait for link training to complete. */ ++ for (j = 0; j < 5000; j++) ++ udelay(100); ++ ++ pci_read_config_dword(tp->pdev, 0xc4, &cfg_val); ++ pci_write_config_dword(tp->pdev, 0xc4, ++ cfg_val | (1 << 15)); ++ } ++ ++ /* Clear the "no snoop" and "relaxed ordering" bits. */ ++ val16 = PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN; ++ /* ++ * Older PCIe devices only support the 128 byte ++ * MPS setting. Enforce the restriction. ++ */ ++ if (!tg3_flag(tp, CPMU_PRESENT)) ++ val16 |= PCI_EXP_DEVCTL_PAYLOAD; ++ pcie_capability_clear_word(tp->pdev, PCI_EXP_DEVCTL, val16); ++ ++ /* Clear error status */ ++ pcie_capability_write_word(tp->pdev, PCI_EXP_DEVSTA, ++ PCI_EXP_DEVSTA_CED | ++ PCI_EXP_DEVSTA_NFED | ++ PCI_EXP_DEVSTA_FED | ++ PCI_EXP_DEVSTA_URD); ++ } ++ ++ tg3_restore_pci_state(tp); ++ ++ tg3_flag_clear(tp, CHIP_RESETTING); ++ tg3_flag_clear(tp, ERROR_PROCESSED); ++ ++ val = 0; ++ if (tg3_flag(tp, 5780_CLASS)) ++ val = tr32(MEMARB_MODE); ++ tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE); ++ ++ if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A3) { ++ tg3_stop_fw(tp); ++ tw32(0x5000, 0x400); ++ } ++ ++ if (tg3_flag(tp, IS_SSB_CORE)) { ++ /* ++ * BCM4785: In order to avoid repercussions from using ++ * potentially defective internal ROM, stop the Rx RISC CPU, ++ * which is not required. ++ */ ++ tg3_stop_fw(tp); ++ tg3_halt_cpu(tp, RX_CPU_BASE); ++ } ++ ++ err = tg3_poll_fw(tp); ++ if (err) ++ return err; ++ ++ tw32(GRC_MODE, tp->grc_mode); ++ ++ if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0) { ++ val = tr32(0xc4); ++ ++ tw32(0xc4, val | (1 << 15)); ++ } ++ ++ if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 && ++ tg3_asic_rev(tp) == ASIC_REV_5705) { ++ tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE; ++ if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0) ++ tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN; ++ tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl); ++ } ++ ++ if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) { ++ tp->mac_mode = MAC_MODE_PORT_MODE_TBI; ++ val = tp->mac_mode; ++ } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) { ++ tp->mac_mode = MAC_MODE_PORT_MODE_GMII; ++ val = tp->mac_mode; ++ } else ++ val = 0; ++ ++ tw32_f(MAC_MODE, val); ++ udelay(40); ++ ++ tg3_ape_unlock(tp, TG3_APE_LOCK_GRC); ++ ++ tg3_mdio_start(tp); ++ ++ if (tg3_flag(tp, PCI_EXPRESS) && ++ tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 && ++ tg3_asic_rev(tp) != ASIC_REV_5785 && ++ !tg3_flag(tp, 57765_PLUS)) { ++ val = tr32(0x7c00); ++ ++ tw32(0x7c00, val | (1 << 25)); ++ } ++ ++ tg3_restore_clk(tp); ++ ++ /* Reprobe ASF enable state. */ ++ tg3_flag_clear(tp, ENABLE_ASF); ++ tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK | ++ TG3_PHYFLG_KEEP_LINK_ON_PWRDN); ++ ++ tg3_flag_clear(tp, ASF_NEW_HANDSHAKE); ++ tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val); ++ if (val == NIC_SRAM_DATA_SIG_MAGIC) { ++ u32 nic_cfg; ++ ++ tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg); ++ if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) { ++ tg3_flag_set(tp, ENABLE_ASF); ++ tp->last_event_jiffies = jiffies; ++ if (tg3_flag(tp, 5750_PLUS)) ++ tg3_flag_set(tp, ASF_NEW_HANDSHAKE); ++ ++ tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &nic_cfg); ++ if (nic_cfg & NIC_SRAM_1G_ON_VAUX_OK) ++ tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK; ++ if (nic_cfg & NIC_SRAM_LNK_FLAP_AVOID) ++ tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN; ++ } ++ } ++ ++ return 0; ++} ++ ++static void tg3_get_nstats(struct tg3 *, struct rtnl_link_stats64 *); ++static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *); ++static void __tg3_set_rx_mode(struct net_device *); ++ ++/* tp->lock is held. */ ++static int tg3_halt(struct tg3 *tp, int kind, bool silent) ++{ ++ int err; ++ ++ tg3_stop_fw(tp); ++ ++ tg3_write_sig_pre_reset(tp, kind); ++ ++ tg3_abort_hw(tp, silent); ++ err = tg3_chip_reset(tp); ++ ++ __tg3_set_mac_addr(tp, false); ++ ++ tg3_write_sig_legacy(tp, kind); ++ tg3_write_sig_post_reset(tp, kind); ++ ++ if (tp->hw_stats) { ++ /* Save the stats across chip resets... */ ++ tg3_get_nstats(tp, &tp->net_stats_prev); ++ tg3_get_estats(tp, &tp->estats_prev); ++ ++ /* And make sure the next sample is new data */ ++ memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats)); ++ } ++ ++ return err; ++} ++ ++static int tg3_set_mac_addr(struct net_device *dev, void *p) ++{ ++ struct tg3 *tp = netdev_priv(dev); ++ struct sockaddr *addr = p; ++ int err = 0; ++ bool skip_mac_1 = false; ++ ++ if (!is_valid_ether_addr(addr->sa_data)) ++ return -EADDRNOTAVAIL; ++ ++ memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); ++ ++ if (!netif_running(dev)) ++ return 0; ++ ++ if (tg3_flag(tp, ENABLE_ASF)) { ++ u32 addr0_high, addr0_low, addr1_high, addr1_low; ++ ++ addr0_high = tr32(MAC_ADDR_0_HIGH); ++ addr0_low = tr32(MAC_ADDR_0_LOW); ++ addr1_high = tr32(MAC_ADDR_1_HIGH); ++ addr1_low = tr32(MAC_ADDR_1_LOW); ++ ++ /* Skip MAC addr 1 if ASF is using it. */ ++ if ((addr0_high != addr1_high || addr0_low != addr1_low) && ++ !(addr1_high == 0 && addr1_low == 0)) ++ skip_mac_1 = true; ++ } ++ spin_lock_bh(&tp->lock); ++ __tg3_set_mac_addr(tp, skip_mac_1); ++ __tg3_set_rx_mode(dev); ++ spin_unlock_bh(&tp->lock); ++ ++ return err; ++} ++ ++/* tp->lock is held. */ ++static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr, ++ dma_addr_t mapping, u32 maxlen_flags, ++ u32 nic_addr) ++{ ++ tg3_write_mem(tp, ++ (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH), ++ ((u64) mapping >> 32)); ++ tg3_write_mem(tp, ++ (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW), ++ ((u64) mapping & 0xffffffff)); ++ tg3_write_mem(tp, ++ (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS), ++ maxlen_flags); ++ ++ if (!tg3_flag(tp, 5705_PLUS)) ++ tg3_write_mem(tp, ++ (bdinfo_addr + TG3_BDINFO_NIC_ADDR), ++ nic_addr); ++} ++ ++static void tg3_coal_tx_init(struct tg3 *tp, struct ethtool_coalesce *ec) ++{ ++ int i = 0; ++ ++ if (!tg3_flag(tp, ENABLE_TSS)) { ++ tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs); ++ tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames); ++ tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq); ++ } else { ++ tw32(HOSTCC_TXCOL_TICKS, 0); ++ tw32(HOSTCC_TXMAX_FRAMES, 0); ++ tw32(HOSTCC_TXCOAL_MAXF_INT, 0); ++ ++ for (; i < tp->txq_cnt; i++) { ++ u32 reg; ++ ++ reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18; ++ tw32(reg, ec->tx_coalesce_usecs); ++ reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18; ++ tw32(reg, ec->tx_max_coalesced_frames); ++ reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18; ++ tw32(reg, ec->tx_max_coalesced_frames_irq); ++ } ++ } ++ ++ for (; i < tp->irq_max - 1; i++) { ++ tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0); ++ tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0); ++ tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0); ++ } ++} ++ ++static void tg3_coal_rx_init(struct tg3 *tp, struct ethtool_coalesce *ec) ++{ ++ int i = 0; ++ u32 limit = tp->rxq_cnt; ++ ++ if (!tg3_flag(tp, ENABLE_RSS)) { ++ tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs); ++ tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames); ++ tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq); ++ limit--; ++ } else { ++ tw32(HOSTCC_RXCOL_TICKS, 0); ++ tw32(HOSTCC_RXMAX_FRAMES, 0); ++ tw32(HOSTCC_RXCOAL_MAXF_INT, 0); ++ } ++ ++ for (; i < limit; i++) { ++ u32 reg; ++ ++ reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18; ++ tw32(reg, ec->rx_coalesce_usecs); ++ reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18; ++ tw32(reg, ec->rx_max_coalesced_frames); ++ reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18; ++ tw32(reg, ec->rx_max_coalesced_frames_irq); ++ } ++ ++ for (; i < tp->irq_max - 1; i++) { ++ tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0); ++ tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0); ++ tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0); ++ } ++} ++ ++static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec) ++{ ++ tg3_coal_tx_init(tp, ec); ++ tg3_coal_rx_init(tp, ec); ++ ++ if (!tg3_flag(tp, 5705_PLUS)) { ++ u32 val = ec->stats_block_coalesce_usecs; ++ ++ tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq); ++ tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq); ++ ++ if (!tp->link_up) ++ val = 0; ++ ++ tw32(HOSTCC_STAT_COAL_TICKS, val); ++ } ++} ++ ++/* tp->lock is held. */ ++static void tg3_tx_rcbs_disable(struct tg3 *tp) ++{ ++ u32 txrcb, limit; ++ ++ /* Disable all transmit rings but the first. */ ++ if (!tg3_flag(tp, 5705_PLUS)) ++ limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16; ++ else if (tg3_flag(tp, 5717_PLUS)) ++ limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4; ++ else if (tg3_flag(tp, 57765_CLASS) || ++ tg3_asic_rev(tp) == ASIC_REV_5762) ++ limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2; ++ else ++ limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE; ++ ++ for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE; ++ txrcb < limit; txrcb += TG3_BDINFO_SIZE) ++ tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS, ++ BDINFO_FLAGS_DISABLED); ++} ++ ++/* tp->lock is held. */ ++static void tg3_tx_rcbs_init(struct tg3 *tp) ++{ ++ int i = 0; ++ u32 txrcb = NIC_SRAM_SEND_RCB; ++ ++ if (tg3_flag(tp, ENABLE_TSS)) ++ i++; ++ ++ for (; i < tp->irq_max; i++, txrcb += TG3_BDINFO_SIZE) { ++ struct tg3_napi *tnapi = &tp->napi[i]; ++ ++ if (!tnapi->tx_ring) ++ continue; ++ ++ tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping, ++ (TG3_TX_RING_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT), ++ NIC_SRAM_TX_BUFFER_DESC); ++ } ++} ++ ++/* tp->lock is held. */ ++static void tg3_rx_ret_rcbs_disable(struct tg3 *tp) ++{ ++ u32 rxrcb, limit; ++ ++ /* Disable all receive return rings but the first. */ ++ if (tg3_flag(tp, 5717_PLUS)) ++ limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17; ++ else if (!tg3_flag(tp, 5705_PLUS)) ++ limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16; ++ else if (tg3_asic_rev(tp) == ASIC_REV_5755 || ++ tg3_asic_rev(tp) == ASIC_REV_5762 || ++ tg3_flag(tp, 57765_CLASS)) ++ limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4; ++ else ++ limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE; ++ ++ for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE; ++ rxrcb < limit; rxrcb += TG3_BDINFO_SIZE) ++ tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS, ++ BDINFO_FLAGS_DISABLED); ++} ++ ++/* tp->lock is held. */ ++static void tg3_rx_ret_rcbs_init(struct tg3 *tp) ++{ ++ int i = 0; ++ u32 rxrcb = NIC_SRAM_RCV_RET_RCB; ++ ++ if (tg3_flag(tp, ENABLE_RSS)) ++ i++; ++ ++ for (; i < tp->irq_max; i++, rxrcb += TG3_BDINFO_SIZE) { ++ struct tg3_napi *tnapi = &tp->napi[i]; ++ ++ if (!tnapi->rx_rcb) ++ continue; ++ ++ tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping, ++ (tp->rx_ret_ring_mask + 1) << ++ BDINFO_FLAGS_MAXLEN_SHIFT, 0); ++ } ++} ++ ++/* tp->lock is held. */ ++static void tg3_rings_reset(struct tg3 *tp) ++{ ++ int i; ++ u32 stblk; ++ struct tg3_napi *tnapi = &tp->napi[0]; ++ ++ tg3_tx_rcbs_disable(tp); ++ ++#ifdef TG3_VMWARE_NETQ_ENABLE ++ for (i = 1; i < TG3_IRQ_MAX_VECS_IOV; i++) ++ tg3_disable_prod_rcbs(tp, i); ++#endif ++ ++ tg3_rx_ret_rcbs_disable(tp); ++ ++ /* Disable interrupts */ ++ tw32_mailbox_f(tp->napi[0].int_mbox, 1); ++ tp->napi[0].chk_msi_cnt = 0; ++ tp->napi[0].last_rx_cons = 0; ++ tp->napi[0].last_tx_cons = 0; ++ ++ /* Zero mailbox registers. */ ++ if (tg3_flag(tp, SUPPORT_MSIX)) { ++ for (i = 1; i < tp->irq_max; i++) { ++ tp->napi[i].tx_prod = 0; ++ tp->napi[i].tx_cons = 0; ++ if (tg3_flag(tp, ENABLE_TSS)) ++ tw32_mailbox(tp->napi[i].prodmbox, 0); ++ tw32_rx_mbox(tp->napi[i].consmbox, 0); ++ tw32_mailbox_f(tp->napi[i].int_mbox, 1); ++ tp->napi[i].chk_msi_cnt = 0; ++ tp->napi[i].last_rx_cons = 0; ++ tp->napi[i].last_tx_cons = 0; ++ ++#ifdef TG3_VMWARE_NETQ_ENABLE ++ if (!tg3_flag(tp, ENABLE_RSS)) { ++ struct tg3_rx_prodring_set *tpr; ++ ++ tpr = &tp->napi[i].prodring; ++ tw32_rx_mbox(tpr->rx_jmb_mbox, 0); ++ tw32_rx_mbox(tpr->rx_std_mbox, 0); ++ } ++#endif ++ } ++ if (!tg3_flag(tp, ENABLE_TSS)) ++ tw32_mailbox(tp->napi[0].prodmbox, 0); ++ } else { ++ tp->napi[0].tx_prod = 0; ++ tp->napi[0].tx_cons = 0; ++ tw32_mailbox(tp->napi[0].prodmbox, 0); ++ tw32_rx_mbox(tp->napi[0].consmbox, 0); ++ } ++ ++ /* Make sure the NIC-based send BD rings are disabled. */ ++ if (!tg3_flag(tp, 5705_PLUS)) { ++ u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW; ++ for (i = 0; i < 16; i++) ++ tw32_tx_mbox(mbox + i * 8, 0); ++ } ++ ++ /* Clear status block in ram. */ ++ memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE); ++ ++ /* Set status block DMA address */ ++ tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH, ++ ((u64) tnapi->status_mapping >> 32)); ++ tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW, ++ ((u64) tnapi->status_mapping & 0xffffffff)); ++ ++ stblk = HOSTCC_STATBLCK_RING1; ++ ++ for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) { ++ u64 mapping = (u64)tnapi->status_mapping; ++ tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32); ++ tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff); ++ stblk += 8; ++ ++ /* Clear status block in ram. */ ++ memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE); ++ } ++ ++ tg3_tx_rcbs_init(tp); ++ tg3_rx_ret_rcbs_init(tp); ++} ++ ++static void tg3_setup_rxbd_thresholds(struct tg3 *tp) ++{ ++ u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh; ++ ++ if (!tg3_flag(tp, 5750_PLUS) || ++ tg3_flag(tp, 5780_CLASS) || ++ tg3_asic_rev(tp) == ASIC_REV_5750 || ++ tg3_asic_rev(tp) == ASIC_REV_5752 || ++ tg3_flag(tp, 57765_PLUS)) ++ bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700; ++ else if (tg3_asic_rev(tp) == ASIC_REV_5755 || ++ tg3_asic_rev(tp) == ASIC_REV_5787) ++ bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755; ++ else ++ bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906; ++ ++#ifdef TG3_VMWARE_NETQ_ENABLE ++ /* In IOV, mode, the std rx BD cache is chopped into 17 pieces. */ ++ if (tg3_flag(tp, ENABLE_IOV)) ++ bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906; ++#endif /* TG3_VMWARE_NETQ_ENABLE */ ++ ++ nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post); ++ host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1); ++ ++ val = min(nic_rep_thresh, host_rep_thresh); ++ tw32(RCVBDI_STD_THRESH, val); ++ ++ if (tg3_flag(tp, 57765_PLUS)) ++ tw32(STD_REPLENISH_LWM, bdcache_maxcnt); ++ ++#ifdef TG3_VMWARE_NETQ_ENABLE ++ if (tg3_flag(tp, 5717_PLUS) && tg3_flag(tp, ENABLE_IOV)) ++ tw32(STD_REPLENISH_LWM, bdcache_maxcnt / 2); ++#endif /* TG3_VMWARE_NETQ_ENABLE */ ++ ++ if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS)) ++ return; ++ ++ bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700; ++ ++#ifdef TG3_VMWARE_NETQ_ENABLE ++ /* In IOV, mode, the jmb rx BD cache is chopped into 17 pieces. */ ++ if (tg3_flag(tp, ENABLE_IOV)) ++ bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5717; ++#endif /* TG3_VMWARE_NETQ_ENABLE */ ++ ++ host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1); ++ ++ val = min(bdcache_maxcnt / 2, host_rep_thresh); ++ tw32(RCVBDI_JUMBO_THRESH, val); ++ ++ if (tg3_flag(tp, 57765_PLUS)) ++ tw32(JMB_REPLENISH_LWM, bdcache_maxcnt); ++ ++#ifdef TG3_VMWARE_NETQ_ENABLE ++ if (tg3_flag(tp, 5717_PLUS) && tg3_flag(tp, ENABLE_IOV)) ++ tw32(JMB_REPLENISH_LWM, bdcache_maxcnt / 2); ++#endif /* TG3_VMWARE_NETQ_ENABLE */ ++} ++ ++static inline u32 calc_crc(unsigned char *buf, int len) ++{ ++ u32 reg; ++ u32 tmp; ++ int j, k; ++ ++ reg = 0xffffffff; ++ ++ for (j = 0; j < len; j++) { ++ reg ^= buf[j]; ++ ++ for (k = 0; k < 8; k++) { ++ tmp = reg & 0x01; ++ ++ reg >>= 1; ++ ++ if (tmp) ++ reg ^= 0xedb88320; ++ } ++ } ++ ++ return ~reg; ++} ++ ++static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all) ++{ ++ /* accept or reject all multicast frames */ ++ tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0); ++ tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0); ++ tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0); ++ tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0); ++} ++ ++static void __tg3_set_rx_mode(struct net_device *dev) ++{ ++ struct tg3 *tp = netdev_priv(dev); ++ u32 rx_mode; ++ ++ rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC | ++ RX_MODE_KEEP_VLAN_TAG); ++ ++ /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG ++ * flag clear. ++ */ ++#ifndef BCM_HAS_NEW_VLAN_INTERFACE ++ if (!tp->vlgrp) ++#endif ++ if (!tg3_flag(tp, ENABLE_ASF)) ++ rx_mode |= RX_MODE_KEEP_VLAN_TAG; ++ ++ if (dev->flags & IFF_PROMISC) { ++ /* Promiscuous mode. */ ++ rx_mode |= RX_MODE_PROMISC; ++ } else if (dev->flags & IFF_ALLMULTI) { ++ /* Accept all multicast. */ ++ tg3_set_multi(tp, 1); ++ } else if (netdev_mc_empty(dev)) { ++ /* Reject all multicast. */ ++ tg3_set_multi(tp, 0); ++ } else { ++ /* Accept one or more multicast(s). */ ++ struct netdev_hw_addr *ha; ++ u32 mc_filter[4] = { 0, }; ++ u32 regidx; ++ u32 bit; ++ u32 crc; ++ ++ netdev_for_each_mc_addr(ha, dev) { ++ crc = calc_crc(ha->addr, ETH_ALEN); ++ bit = ~crc & 0x7f; ++ regidx = (bit & 0x60) >> 5; ++ bit &= 0x1f; ++ mc_filter[regidx] |= (1 << bit); ++ } ++ ++ tw32(MAC_HASH_REG_0, mc_filter[0]); ++ tw32(MAC_HASH_REG_1, mc_filter[1]); ++ tw32(MAC_HASH_REG_2, mc_filter[2]); ++ tw32(MAC_HASH_REG_3, mc_filter[3]); ++ } ++ ++#ifdef IFF_UNICAST_FLT ++ if (netdev_uc_count(dev) > TG3_MAX_UCAST_ADDR(tp)) { ++ rx_mode |= RX_MODE_PROMISC; ++ } else if (!(dev->flags & IFF_PROMISC)) { ++ /* Add all entries into to the mac addr filter list */ ++ int i = 0; ++ struct netdev_hw_addr *ha; ++ ++ netdev_for_each_uc_addr(ha, dev) { ++ __tg3_set_one_mac_addr(tp, ha->addr, ++ i + TG3_UCAST_ADDR_IDX(tp)); ++ i++; ++ } ++ } ++#endif ++ ++ if (rx_mode != tp->rx_mode) { ++ tp->rx_mode = rx_mode; ++ tw32_f(MAC_RX_MODE, rx_mode); ++ udelay(10); ++ } ++} ++ ++static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp, u32 qcnt) ++{ ++ int i; ++ ++ for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) ++ tp->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, qcnt); ++} ++ ++static void tg3_rss_check_indir_tbl(struct tg3 *tp) ++{ ++ int i; ++ ++ if (!tg3_flag(tp, ENABLE_RSS)) ++ return; ++ ++ if (tp->rxq_cnt == 1) { ++ memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl)); ++ return; ++ } ++ ++ /* Validate table against current IRQ count */ ++ for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) { ++ if (tp->rss_ind_tbl[i] >= tp->rxq_cnt) ++ break; ++ } ++ ++ if (i != TG3_RSS_INDIR_TBL_SIZE) ++ tg3_rss_init_dflt_indir_tbl(tp, tp->rxq_cnt); ++} ++ ++static void tg3_rss_write_indir_tbl(struct tg3 *tp) ++{ ++ int i = 0; ++ u32 reg = MAC_RSS_INDIR_TBL_0; ++ ++ while (i < TG3_RSS_INDIR_TBL_SIZE) { ++ u32 val = tp->rss_ind_tbl[i]; ++ i++; ++ for (; i % 8; i++) { ++ val <<= 4; ++ val |= tp->rss_ind_tbl[i]; ++ } ++ tw32(reg, val); ++ reg += 4; ++ } ++} ++ ++static inline u32 tg3_lso_rd_dma_workaround_bit(struct tg3 *tp) ++{ ++ if (tg3_asic_rev(tp) == ASIC_REV_5719) ++ return TG3_LSO_RD_DMA_TX_LENGTH_WA_5719; ++ else ++ return TG3_LSO_RD_DMA_TX_LENGTH_WA_5720; ++} ++ ++/* tp->lock is held. */ ++static int tg3_reset_hw(struct tg3 *tp, bool reset_phy) ++{ ++ u32 val, rdmac_mode; ++ int i, err, limit; ++ struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring; ++ ++ tg3_disable_ints(tp); ++ ++ tg3_stop_fw(tp); ++ ++ tg3_write_sig_pre_reset(tp, RESET_KIND_INIT); ++ ++ if (tg3_flag(tp, INIT_COMPLETE)) ++ tg3_abort_hw(tp, 1); ++ ++ if ((tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) && ++ !(tp->phy_flags & TG3_PHYFLG_USER_CONFIGURED)) { ++ tg3_phy_pull_config(tp); ++ ++ /* Pull eee config only if not overridden by module param */ ++ if (tg3_disable_eee == -1) ++ tg3_eee_pull_config(tp, NULL); ++ ++ tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED; ++ } ++ ++ /* Enable MAC control of LPI */ ++#ifndef BCM_INCLUDE_PHYLIB_SUPPORT ++ if (tg3_asic_rev(tp) != ASIC_REV_5785) ++#endif ++ if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) ++ tg3_setup_eee(tp); ++ ++ if (reset_phy) ++ tg3_phy_reset(tp); ++ ++ err = tg3_chip_reset(tp); ++ if (err) ++ return err; ++ ++ tg3_write_sig_legacy(tp, RESET_KIND_INIT); ++ ++ if (tg3_chip_rev(tp) == CHIPREV_5784_AX) { ++ val = tr32(TG3_CPMU_CTRL); ++ val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE); ++ tw32(TG3_CPMU_CTRL, val); ++ ++ val = tr32(TG3_CPMU_LSPD_10MB_CLK); ++ val &= ~CPMU_LSPD_10MB_MACCLK_MASK; ++ val |= CPMU_LSPD_10MB_MACCLK_6_25; ++ tw32(TG3_CPMU_LSPD_10MB_CLK, val); ++ ++ val = tr32(TG3_CPMU_LNK_AWARE_PWRMD); ++ val &= ~CPMU_LNK_AWARE_MACCLK_MASK; ++ val |= CPMU_LNK_AWARE_MACCLK_6_25; ++ tw32(TG3_CPMU_LNK_AWARE_PWRMD, val); ++ ++ val = tr32(TG3_CPMU_HST_ACC); ++ val &= ~CPMU_HST_ACC_MACCLK_MASK; ++ val |= CPMU_HST_ACC_MACCLK_6_25; ++ tw32(TG3_CPMU_HST_ACC, val); ++ } ++ ++ if (tg3_asic_rev(tp) == ASIC_REV_57780) { ++ val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK; ++ val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN | ++ PCIE_PWR_MGMT_L1_THRESH_4MS; ++ tw32(PCIE_PWR_MGMT_THRESH, val); ++ ++ val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK; ++ tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS); ++ ++ tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR); ++ ++ val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN; ++ tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS); ++ } ++ ++ if (tg3_flag(tp, L1PLLPD_EN)) { ++ u32 grc_mode = tr32(GRC_MODE); ++ ++ /* Access the lower 1K of PL PCIE block registers. */ ++ val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK; ++ tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL); ++ ++ val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1); ++ tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1, ++ val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN); ++ ++ tw32(GRC_MODE, grc_mode); ++ } ++ ++ if (tg3_flag(tp, 57765_CLASS)) { ++ if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) { ++ u32 grc_mode = tr32(GRC_MODE); ++ ++ /* Access the lower 1K of PL PCIE block registers. */ ++ val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK; ++ tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL); ++ ++ val = tr32(TG3_PCIE_TLDLPL_PORT + ++ TG3_PCIE_PL_LO_PHYCTL5); ++ tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5, ++ val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ); ++ ++ tw32(GRC_MODE, grc_mode); ++ } ++ ++ if (tg3_chip_rev(tp) != CHIPREV_57765_AX) { ++ u32 grc_mode; ++ ++ /* Fix transmit hangs */ ++ val = tr32(TG3_CPMU_PADRNG_CTL); ++ val |= TG3_CPMU_PADRNG_CTL_RDIV2; ++ tw32(TG3_CPMU_PADRNG_CTL, val); ++ ++ grc_mode = tr32(GRC_MODE); ++ ++ /* Access the lower 1K of DL PCIE block registers. */ ++ val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK; ++ tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL); ++ ++ val = tr32(TG3_PCIE_TLDLPL_PORT + ++ TG3_PCIE_DL_LO_FTSMAX); ++ val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK; ++ tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX, ++ val | TG3_PCIE_DL_LO_FTSMAX_VAL); ++ ++ tw32(GRC_MODE, grc_mode); ++ } ++ ++ val = tr32(TG3_CPMU_LSPD_10MB_CLK); ++ val &= ~CPMU_LSPD_10MB_MACCLK_MASK; ++ val |= CPMU_LSPD_10MB_MACCLK_6_25; ++ tw32(TG3_CPMU_LSPD_10MB_CLK, val); ++ } ++ ++ /* This works around an issue with Athlon chipsets on ++ * B3 tigon3 silicon. This bit has no effect on any ++ * other revision. But do not set this on PCI Express ++ * chips and don't even touch the clocks if the CPMU is present. ++ */ ++ if (!tg3_flag(tp, CPMU_PRESENT)) { ++ if (!tg3_flag(tp, PCI_EXPRESS)) ++ tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT; ++ tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl); ++ } ++ ++ if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 && ++ tg3_flag(tp, PCIX_MODE)) { ++ val = tr32(TG3PCI_PCISTATE); ++ val |= PCISTATE_RETRY_SAME_DMA; ++ tw32(TG3PCI_PCISTATE, val); ++ } ++ ++ if (tg3_flag(tp, ENABLE_APE)) { ++ /* Allow reads and writes to the ++ * APE register and memory space. ++ */ ++ val = tr32(TG3PCI_PCISTATE); ++ val |= PCISTATE_ALLOW_APE_CTLSPC_WR | ++ PCISTATE_ALLOW_APE_SHMEM_WR | ++ PCISTATE_ALLOW_APE_PSPACE_WR; ++ tw32(TG3PCI_PCISTATE, val); ++ } ++ ++ if (tg3_chip_rev(tp) == CHIPREV_5704_BX) { ++ /* Enable some hw fixes. */ ++ val = tr32(TG3PCI_MSI_DATA); ++ val |= (1 << 26) | (1 << 28) | (1 << 29); ++ tw32(TG3PCI_MSI_DATA, val); ++ } ++ ++ /* Descriptor ring init may make accesses to the ++ * NIC SRAM area to setup the TX descriptors, so we ++ * can only do this after the hardware has been ++ * successfully reset. ++ */ ++ err = tg3_init_rings(tp); ++ if (err) ++ return err; ++ ++ if (tg3_flag(tp, 57765_PLUS)) { ++ val = tr32(TG3PCI_DMA_RW_CTRL) & ++ ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT; ++ if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) ++ val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK; ++ if (!tg3_flag(tp, 57765_CLASS) && ++ tg3_asic_rev(tp) != ASIC_REV_5717 && ++ tg3_asic_rev(tp) != ASIC_REV_5762) ++ val |= DMA_RWCTRL_TAGGED_STAT_WA; ++ tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl); ++ } else if (tg3_asic_rev(tp) != ASIC_REV_5784 && ++ tg3_asic_rev(tp) != ASIC_REV_5761) { ++ /* This value is determined during the probe time DMA ++ * engine test, tg3_test_dma. ++ */ ++ tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl); ++ } ++ ++ tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS | ++ GRC_MODE_4X_NIC_SEND_RINGS | ++ GRC_MODE_NO_TX_PHDR_CSUM | ++ GRC_MODE_NO_RX_PHDR_CSUM); ++ tp->grc_mode |= GRC_MODE_HOST_SENDBDS; ++ ++ /* Pseudo-header checksum is done by hardware logic and not ++ * the offload processers, so make the chip do the pseudo- ++ * header checksums on receive. For transmit it is more ++ * convenient to do the pseudo-header checksum in software ++ * as Linux does that on transmit for us in all cases. ++ */ ++ tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM; ++ ++ val = GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP; ++ if (tp->rxptpctl) ++ tw32(TG3_RX_PTP_CTL, ++ tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK); ++ ++ if (tg3_flag(tp, PTP_CAPABLE)) ++ val |= GRC_MODE_TIME_SYNC_ENABLE; ++ ++#ifdef TG3_VMWARE_NETQ_ENABLE ++ if (tg3_flag(tp, ENABLE_IOV)) ++ val |= GRC_MODE_IOV_ENABLE; ++#endif ++ ++ tw32(GRC_MODE, tp->grc_mode | val); ++ ++ /* Setup the timer prescalar register. Clock is always 66Mhz. */ ++ val = tr32(GRC_MISC_CFG); ++ val &= ~0xff; ++ val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT); ++ tw32(GRC_MISC_CFG, val); ++ ++ /* Initialize MBUF/DESC pool. */ ++ if (tg3_flag(tp, 5750_PLUS)) { ++ /* Do nothing. */ ++ } else if (tg3_asic_rev(tp) != ASIC_REV_5705) { ++ tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE); ++ if (tg3_asic_rev(tp) == ASIC_REV_5704) ++ tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64); ++ else ++ tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96); ++ tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE); ++ tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE); ++ } else if (tg3_flag(tp, TSO_CAPABLE)) { ++#if TG3_TSO_SUPPORT != 0 ++ int fw_len; ++ ++ fw_len = (TG3_TSO5_FW_TEXT_LEN + ++ TG3_TSO5_FW_RODATA_LEN + ++ TG3_TSO5_FW_DATA_LEN + ++ TG3_TSO5_FW_SBSS_LEN + ++ TG3_TSO5_FW_BSS_LEN); ++ fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1); ++ tw32(BUFMGR_MB_POOL_ADDR, ++ NIC_SRAM_MBUF_POOL_BASE5705 + fw_len); ++ tw32(BUFMGR_MB_POOL_SIZE, ++ NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00); ++#endif ++ } ++ ++ if (tp->dev->mtu <= ETH_DATA_LEN) { ++ tw32(BUFMGR_MB_RDMA_LOW_WATER, ++ tp->bufmgr_config.mbuf_read_dma_low_water); ++ tw32(BUFMGR_MB_MACRX_LOW_WATER, ++ tp->bufmgr_config.mbuf_mac_rx_low_water); ++ tw32(BUFMGR_MB_HIGH_WATER, ++ tp->bufmgr_config.mbuf_high_water); ++ } else { ++ tw32(BUFMGR_MB_RDMA_LOW_WATER, ++ tp->bufmgr_config.mbuf_read_dma_low_water_jumbo); ++ tw32(BUFMGR_MB_MACRX_LOW_WATER, ++ tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo); ++ tw32(BUFMGR_MB_HIGH_WATER, ++ tp->bufmgr_config.mbuf_high_water_jumbo); ++ } ++ tw32(BUFMGR_DMA_LOW_WATER, ++ tp->bufmgr_config.dma_low_water); ++ tw32(BUFMGR_DMA_HIGH_WATER, ++ tp->bufmgr_config.dma_high_water); ++ ++ val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE; ++ if (tg3_asic_rev(tp) == ASIC_REV_5719) ++ val |= BUFMGR_MODE_NO_TX_UNDERRUN; ++ if (tg3_asic_rev(tp) == ASIC_REV_5717 || ++ tg3_asic_rev(tp) == ASIC_REV_5762 || ++ tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 || ++ tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0) ++ val |= BUFMGR_MODE_MBLOW_ATTN_ENAB; ++ tw32(BUFMGR_MODE, val); ++ for (i = 0; i < 2000; i++) { ++ if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE) ++ break; ++ udelay(10); ++ } ++ if (i >= 2000) { ++ netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__); ++ return -ENODEV; ++ } ++ ++ if (tg3_chip_rev_id(tp) == CHIPREV_ID_5906_A1) ++ tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2); ++ ++ tg3_setup_rxbd_thresholds(tp); ++ ++ /* Initialize TG3_BDINFO's at: ++ * RCVDBDI_STD_BD: standard eth size rx ring ++ * RCVDBDI_JUMBO_BD: jumbo frame rx ring ++ * RCVDBDI_MINI_BD: small frame rx ring (??? does not work) ++ * ++ * like so: ++ * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring ++ * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) | ++ * ring attribute flags ++ * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM ++ * ++ * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries. ++ * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries. ++ * ++ * The size of each ring is fixed in the firmware, but the location is ++ * configurable. ++ */ ++ tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH, ++ ((u64) tpr->rx_std_mapping >> 32)); ++ tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW, ++ ((u64) tpr->rx_std_mapping & 0xffffffff)); ++ if (!tg3_flag(tp, 5717_PLUS)) ++ tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR, ++ NIC_SRAM_RX_BUFFER_DESC); ++ ++ /* Disable the mini ring */ ++ if (!tg3_flag(tp, 5705_PLUS)) ++ tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS, ++ BDINFO_FLAGS_DISABLED); ++ ++ /* Program the jumbo buffer descriptor ring control ++ * blocks on those devices that have them. ++ */ ++ if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 || ++ (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) { ++ ++ if (tg3_flag(tp, JUMBO_RING_ENABLE)) { ++ tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH, ++ ((u64) tpr->rx_jmb_mapping >> 32)); ++ tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW, ++ ((u64) tpr->rx_jmb_mapping & 0xffffffff)); ++ val = TG3_RX_JMB_RING_SIZE(tp) << ++ BDINFO_FLAGS_MAXLEN_SHIFT; ++ tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS, ++ val | BDINFO_FLAGS_USE_EXT_RECV); ++ if (!tg3_flag(tp, USE_JUMBO_BDFLAG) || ++ tg3_flag(tp, 57765_CLASS) || ++ tg3_asic_rev(tp) == ASIC_REV_5762) ++ tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR, ++ NIC_SRAM_RX_JUMBO_BUFFER_DESC); ++ } else { ++ tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS, ++ BDINFO_FLAGS_DISABLED); ++ } ++ ++ if (tg3_flag(tp, 57765_PLUS)) { ++ val = TG3_RX_STD_RING_SIZE(tp); ++ val <<= BDINFO_FLAGS_MAXLEN_SHIFT; ++ val |= (TG3_RX_STD_DMA_SZ << 2); ++ } else ++ val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT; ++ } else ++ val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT; ++ ++ tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val); ++ ++ tpr->rx_std_prod_idx = tp->rx_pending; ++ tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx); ++ ++ tpr->rx_jmb_prod_idx = ++ tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0; ++ tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx); ++ ++ tg3_rings_reset(tp); ++ ++ /* Initialize MAC address and backoff seed. */ ++ __tg3_set_mac_addr(tp, false); ++ ++ /* MTU + ethernet header + FCS + optional VLAN tag */ ++ tw32(MAC_RX_MTU_SIZE, ++ tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN); ++ ++ /* The slot time is changed by tg3_setup_phy if we ++ * run at gigabit with half duplex. ++ */ ++ val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) | ++ (6 << TX_LENGTHS_IPG_SHIFT) | ++ (32 << TX_LENGTHS_SLOT_TIME_SHIFT); ++ ++ if (tg3_asic_rev(tp) == ASIC_REV_5720 || ++ tg3_asic_rev(tp) == ASIC_REV_5762) ++ val |= tr32(MAC_TX_LENGTHS) & ++ (TX_LENGTHS_JMB_FRM_LEN_MSK | ++ TX_LENGTHS_CNT_DWN_VAL_MSK); ++ ++ tw32(MAC_TX_LENGTHS, val); ++ ++ /* Receive rules. */ ++ tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS); ++ tw32(RCVLPC_CONFIG, 0x0181); ++ ++ /* Calculate RDMAC_MODE setting early, we need it to determine ++ * the RCVLPC_STATE_ENABLE mask. ++ */ ++ rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB | ++ RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB | ++ RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB | ++ RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB | ++ RDMAC_MODE_LNGREAD_ENAB); ++ ++ if (tg3_asic_rev(tp) == ASIC_REV_5717) ++ rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS; ++ ++ if (tg3_asic_rev(tp) == ASIC_REV_5784 || ++ tg3_asic_rev(tp) == ASIC_REV_5785 || ++ tg3_asic_rev(tp) == ASIC_REV_57780) ++ rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB | ++ RDMAC_MODE_MBUF_RBD_CRPT_ENAB | ++ RDMAC_MODE_MBUF_SBD_CRPT_ENAB; ++ ++ if (tg3_asic_rev(tp) == ASIC_REV_5705 && ++ tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) { ++ if (tg3_flag(tp, TSO_CAPABLE) && ++ tg3_asic_rev(tp) == ASIC_REV_5705) { ++ rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128; ++ } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) && ++ !tg3_flag(tp, IS_5788)) { ++ rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST; ++ } ++ } ++ ++ if (tg3_flag(tp, PCI_EXPRESS)) ++ rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST; ++ ++ if (tg3_asic_rev(tp) == ASIC_REV_57766) { ++ tp->dma_limit = 0; ++ ++#if defined(__VMKLNX__) ++ if (tg3_flag(tp, TSO_CAPABLE)) ++ tp->dma_limit = TG3_TX_BD_DMA_MAX_32K; ++#endif ++ if (tp->dev->mtu <= ETH_DATA_LEN) ++ rdmac_mode |= RDMAC_MODE_JMB_2K_MMRR; ++ } ++ ++ /* Enables IPV4 checksum offload as well. */ ++ if (tg3_flag(tp, HW_TSO_1) || ++ tg3_flag(tp, HW_TSO_2) || ++ tg3_flag(tp, HW_TSO_3)) ++ rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN; ++ ++ /* Enables IPV6 checksum offload as well. */ ++ if (tg3_flag(tp, 57765_PLUS) || ++ tg3_asic_rev(tp) == ASIC_REV_5785 || ++ tg3_asic_rev(tp) == ASIC_REV_57780) ++ rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN; ++ ++ if (tg3_asic_rev(tp) == ASIC_REV_5720 || ++ tg3_asic_rev(tp) == ASIC_REV_5762) ++ rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET; ++ ++ if (tg3_asic_rev(tp) == ASIC_REV_5761 || ++ tg3_asic_rev(tp) == ASIC_REV_5784 || ++ tg3_asic_rev(tp) == ASIC_REV_5785 || ++ tg3_asic_rev(tp) == ASIC_REV_57780 || ++ tg3_flag(tp, 57765_PLUS)) { ++ u32 tgtreg; ++ ++ if (tg3_asic_rev(tp) == ASIC_REV_5762) ++ tgtreg = TG3_RDMA_RSRVCTRL_REG2; ++ else ++ tgtreg = TG3_RDMA_RSRVCTRL_REG; ++ ++ val = tr32(tgtreg); ++ if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 || ++ tg3_asic_rev(tp) == ASIC_REV_5762) { ++ val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK | ++ TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK | ++ TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK); ++ val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B | ++ TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K | ++ TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K; ++ } ++ tw32(tgtreg, val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX); ++ } ++ ++ if (tg3_asic_rev(tp) == ASIC_REV_5719 || ++ tg3_asic_rev(tp) == ASIC_REV_5720 || ++ tg3_asic_rev(tp) == ASIC_REV_5762) { ++ u32 tgtreg; ++ ++ if (tg3_asic_rev(tp) == ASIC_REV_5762) ++ tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL2; ++ else ++ tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL; ++ ++ val = tr32(tgtreg); ++ tw32(tgtreg, val | ++ TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K | ++ TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K); ++ } ++ ++ /* Receive/send statistics. */ ++ if (tg3_flag(tp, 5750_PLUS)) { ++ val = tr32(RCVLPC_STATS_ENABLE); ++ val &= ~RCVLPC_STATSENAB_DACK_FIX; ++ tw32(RCVLPC_STATS_ENABLE, val); ++ } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) && ++ tg3_flag(tp, TSO_CAPABLE)) { ++ val = tr32(RCVLPC_STATS_ENABLE); ++ val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX; ++ tw32(RCVLPC_STATS_ENABLE, val); ++ } else { ++ tw32(RCVLPC_STATS_ENABLE, 0xffffff); ++ } ++ tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE); ++ tw32(SNDDATAI_STATSENAB, 0xffffff); ++ tw32(SNDDATAI_STATSCTRL, ++ (SNDDATAI_SCTRL_ENABLE | ++ SNDDATAI_SCTRL_FASTUPD)); ++ ++ /* Setup host coalescing engine. */ ++ tw32(HOSTCC_MODE, 0); ++ for (i = 0; i < 2000; i++) { ++ if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE)) ++ break; ++ udelay(10); ++ } ++ ++ __tg3_set_coalesce(tp, &tp->coal); ++ ++ if (!tg3_flag(tp, 5705_PLUS)) { ++ /* Status/statistics block address. See tg3_timer, ++ * the tg3_periodic_fetch_stats call there, and ++ * tg3_get_stats to see how this works for 5705/5750 chips. ++ */ ++ tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH, ++ ((u64) tp->stats_mapping >> 32)); ++ tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW, ++ ((u64) tp->stats_mapping & 0xffffffff)); ++ tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK); ++ ++ tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK); ++ ++ /* Clear statistics and status block memory areas */ ++ for (i = NIC_SRAM_STATS_BLK; ++ i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE; ++ i += sizeof(u32)) { ++ tg3_write_mem(tp, i, 0); ++ udelay(40); ++ } ++ } ++ ++ tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode); ++ ++ tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE); ++ tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE); ++ if (!tg3_flag(tp, 5705_PLUS)) ++ tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE); ++ ++ if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) { ++ tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT; ++ /* reset to prevent losing 1st rx packet intermittently */ ++ tw32_f(MAC_RX_MODE, RX_MODE_RESET); ++ udelay(10); ++ } ++ ++ tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE | ++ MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | ++ MAC_MODE_FHDE_ENABLE; ++ if (tg3_flag(tp, ENABLE_APE)) ++ tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN; ++ if (!tg3_flag(tp, 5705_PLUS) && ++ !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) && ++ tg3_asic_rev(tp) != ASIC_REV_5700) ++ tp->mac_mode |= MAC_MODE_LINK_POLARITY; ++ tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR); ++ udelay(40); ++ ++ /* tp->grc_local_ctrl is partially set up during tg3_get_invariants(). ++ * If TG3_FLAG_IS_NIC is zero, we should read the ++ * register to preserve the GPIO settings for LOMs. The GPIOs, ++ * whether used as inputs or outputs, are set by boot code after ++ * reset. ++ */ ++ if (!tg3_flag(tp, IS_NIC)) { ++ u32 gpio_mask; ++ ++ gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 | ++ GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 | ++ GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2; ++ ++ if (tg3_asic_rev(tp) == ASIC_REV_5752) ++ gpio_mask |= GRC_LCLCTRL_GPIO_OE3 | ++ GRC_LCLCTRL_GPIO_OUTPUT3; ++ ++ if (tg3_asic_rev(tp) == ASIC_REV_5755) ++ gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL; ++ ++ tp->grc_local_ctrl &= ~gpio_mask; ++ tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask; ++ ++ /* GPIO1 must be driven high for eeprom write protect */ ++ if (tg3_flag(tp, EEPROM_WRITE_PROT)) ++ tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 | ++ GRC_LCLCTRL_GPIO_OUTPUT1); ++ } ++ tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl); ++ udelay(100); ++ ++ if (tg3_flag(tp, USING_MSIX)) { ++ val = tr32(MSGINT_MODE); ++ val |= MSGINT_MODE_ENABLE; ++ if (tp->irq_cnt > 1) ++ val |= MSGINT_MODE_MULTIVEC_EN; ++ if (!tg3_flag(tp, 1SHOT_MSI)) ++ val |= MSGINT_MODE_ONE_SHOT_DISABLE; ++ tw32(MSGINT_MODE, val); ++ } ++ ++ if (!tg3_flag(tp, 5705_PLUS)) { ++ tw32_f(DMAC_MODE, DMAC_MODE_ENABLE); ++ udelay(40); ++ } ++ ++ val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB | ++ WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB | ++ WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB | ++ WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB | ++ WDMAC_MODE_LNGREAD_ENAB); ++ ++ if (tg3_asic_rev(tp) == ASIC_REV_5705 && ++ tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) { ++ if (tg3_flag(tp, TSO_CAPABLE) && ++ (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 || ++ tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A2)) { ++ /* nothing */ ++ } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) && ++ !tg3_flag(tp, IS_5788)) { ++ val |= WDMAC_MODE_RX_ACCEL; ++ } ++ } ++ ++ /* Enable host coalescing bug fix */ ++ if (tg3_flag(tp, 5755_PLUS)) ++ val |= WDMAC_MODE_STATUS_TAG_FIX; ++ ++ if (tg3_asic_rev(tp) == ASIC_REV_5785) ++ val |= WDMAC_MODE_BURST_ALL_DATA; ++ ++ tw32_f(WDMAC_MODE, val); ++ udelay(40); ++ ++ if (tg3_flag(tp, PCIX_MODE)) { ++ u16 pcix_cmd; ++ ++ pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD, ++ &pcix_cmd); ++ if (tg3_asic_rev(tp) == ASIC_REV_5703) { ++ pcix_cmd &= ~PCI_X_CMD_MAX_READ; ++ pcix_cmd |= PCI_X_CMD_READ_2K; ++ } else if (tg3_asic_rev(tp) == ASIC_REV_5704) { ++ pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ); ++ pcix_cmd |= PCI_X_CMD_READ_2K; ++ } ++ pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD, ++ pcix_cmd); ++ } ++ ++ tw32_f(RDMAC_MODE, rdmac_mode); ++ udelay(40); ++ ++ if (tg3_asic_rev(tp) == ASIC_REV_5719 || ++ tg3_asic_rev(tp) == ASIC_REV_5720) { ++ for (i = 0; i < TG3_NUM_RDMA_CHANNELS; i++) { ++ if (tr32(TG3_RDMA_LENGTH + (i << 2)) > TG3_MAX_MTU(tp)) ++ break; ++ } ++ if (i < TG3_NUM_RDMA_CHANNELS) { ++ val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL); ++ val |= tg3_lso_rd_dma_workaround_bit(tp); ++ tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val); ++ tg3_flag_set(tp, 5719_5720_RDMA_BUG); ++ } ++ } ++ ++ tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE); ++ if (!tg3_flag(tp, 5705_PLUS)) ++ tw32(MBFREE_MODE, MBFREE_MODE_ENABLE); ++ ++ if (tg3_asic_rev(tp) == ASIC_REV_5761) ++ tw32(SNDDATAC_MODE, ++ SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY); ++ else ++ tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE); ++ ++ tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE); ++#ifdef TG3_VMWARE_NETQ_ENABLE ++ val = RCVBDI_MODE_ENABLE; ++ if (!tg3_flag(tp, ENABLE_IOV)) ++ val |= RCVBDI_MODE_RCB_ATTN_ENAB; ++ tw32(RCVBDI_MODE, val); ++ /* No packet drop if there is no RBDs. H/w will continues to service ++ RX packets for particular VMQ until all packets are drained. */ ++ val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ | (2<<13); ++#else ++ tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB); ++ val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ; ++#endif ++ if (tg3_flag(tp, LRG_PROD_RING_CAP)) ++ val |= RCVDBDI_MODE_LRG_RING_SZ; ++ tw32(RCVDBDI_MODE, val); ++ tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE); ++#if TG3_TSO_SUPPORT != 0 ++ if (tg3_flag(tp, HW_TSO_1) || ++ tg3_flag(tp, HW_TSO_2) || ++ tg3_flag(tp, HW_TSO_3)) ++ tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8); ++#endif ++ val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE; ++ if (tg3_flag(tp, ENABLE_TSS)) ++ val |= SNDBDI_MODE_MULTI_TXQ_EN; ++ tw32(SNDBDI_MODE, val); ++ tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE); ++ ++ if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) { ++ err = tg3_load_5701_a0_firmware_fix(tp); ++ if (err) ++ return err; ++ } ++ ++ if (tg3_asic_rev(tp) == ASIC_REV_57766) { ++ /* Ignore any errors for the firmware download. If download ++ * fails, the device will operate with EEE disabled ++ */ ++ tg3_load_57766_firmware(tp); ++ } ++ ++#if TG3_TSO_SUPPORT != 0 ++ if (tg3_flag(tp, TSO_CAPABLE)) { ++ err = tg3_load_tso_firmware(tp); ++ if (err) ++ return err; ++ } ++#endif ++ ++ tp->tx_mode = TX_MODE_ENABLE; ++ ++ if (tg3_flag(tp, 5755_PLUS) || ++ tg3_asic_rev(tp) == ASIC_REV_5906) ++ tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX; ++ ++ if (tg3_asic_rev(tp) == ASIC_REV_5720 || ++ tg3_asic_rev(tp) == ASIC_REV_5762) { ++ val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE; ++ tp->tx_mode &= ~val; ++ tp->tx_mode |= tr32(MAC_TX_MODE) & val; ++ } ++ ++ tw32_f(MAC_TX_MODE, tp->tx_mode); ++ udelay(100); ++ ++#ifdef TG3_VMWARE_NETQ_ENABLE ++ tg3_netq_restore(tp); ++#endif ++ ++ if (tg3_flag(tp, ENABLE_RSS)) { ++ tg3_rss_write_indir_tbl(tp); ++ ++ /* Setup the "secret" hash key. */ ++ tw32(MAC_RSS_HASH_KEY_0, 0x5f865437); ++ tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc); ++ tw32(MAC_RSS_HASH_KEY_2, 0x50103a45); ++ tw32(MAC_RSS_HASH_KEY_3, 0x36621985); ++ tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8); ++ tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e); ++ tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556); ++ tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe); ++ tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7); ++ tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481); ++ } ++ ++ tp->rx_mode = RX_MODE_ENABLE; ++ if (tg3_flag(tp, 5755_PLUS)) ++ tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE; ++ ++ if (tg3_asic_rev(tp) == ASIC_REV_5762) ++ tp->rx_mode |= RX_MODE_IPV4_FRAG_FIX; ++ ++ if (tg3_flag(tp, ENABLE_RSS)) ++ tp->rx_mode |= RX_MODE_RSS_ENABLE | ++ RX_MODE_RSS_ITBL_HASH_BITS_7 | ++ RX_MODE_RSS_IPV6_HASH_EN | ++ RX_MODE_RSS_TCP_IPV6_HASH_EN | ++ RX_MODE_RSS_IPV4_HASH_EN | ++ RX_MODE_RSS_TCP_IPV4_HASH_EN; ++ ++ tw32_f(MAC_RX_MODE, tp->rx_mode); ++ udelay(10); ++ ++ tw32(MAC_LED_CTRL, tp->led_ctrl); ++ ++ tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB); ++ if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) { ++ tw32_f(MAC_RX_MODE, RX_MODE_RESET); ++ udelay(10); ++ } ++ tw32_f(MAC_RX_MODE, tp->rx_mode); ++ udelay(10); ++ ++ if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) { ++ if ((tg3_asic_rev(tp) == ASIC_REV_5704) && ++ !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) { ++ /* Set drive transmission level to 1.2V */ ++ /* only if the signal pre-emphasis bit is not set */ ++ val = tr32(MAC_SERDES_CFG); ++ val &= 0xfffff000; ++ val |= 0x880; ++ tw32(MAC_SERDES_CFG, val); ++ } ++ if (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1) ++ tw32(MAC_SERDES_CFG, 0x616000); ++ } ++ ++ /* Prevent chip from dropping frames when flow control ++ * is enabled. ++ */ ++ if (tg3_flag(tp, 57765_CLASS)) ++ val = 1; ++ else ++ val = 2; ++ tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val); ++ ++ if (tg3_asic_rev(tp) == ASIC_REV_5704 && ++ (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) { ++ /* Use hardware link auto-negotiation */ ++ tg3_flag_set(tp, HW_AUTONEG); ++ } ++ ++ if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) && ++ tg3_asic_rev(tp) == ASIC_REV_5714) { ++ u32 tmp; ++ ++ tmp = tr32(SERDES_RX_CTRL); ++ tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT); ++ tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT; ++ tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT; ++ tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl); ++ } ++ ++ if (!tg3_flag(tp, USE_PHYLIB)) { ++ if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) ++ tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER; ++ ++ err = tg3_setup_phy(tp, false); ++ if (err) ++ return err; ++ ++ if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) && ++ !(tp->phy_flags & TG3_PHYFLG_IS_FET)) { ++ u32 tmp; ++ ++ /* Clear CRC stats. */ ++ if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) { ++ tg3_writephy(tp, MII_TG3_TEST1, ++ tmp | MII_TG3_TEST1_CRC_EN); ++ tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp); ++ } ++ } ++ } ++ ++ __tg3_set_rx_mode(tp->dev); ++ ++ /* Initialize receive rules. */ ++ tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK); ++ tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK); ++ tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK); ++ tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK); ++ ++ if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) ++ limit = 8; ++ else ++ limit = 16; ++ if (tg3_flag(tp, ENABLE_ASF)) ++ limit -= 4; ++ switch (limit) { ++ case 16: ++ tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0); ++ case 15: ++ tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0); ++ case 14: ++ tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0); ++ case 13: ++ tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0); ++ case 12: ++ tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0); ++ case 11: ++ tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0); ++ case 10: ++ tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0); ++ case 9: ++ tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0); ++ case 8: ++ tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0); ++ case 7: ++ tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0); ++ case 6: ++ tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0); ++ case 5: ++ tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0); ++ case 4: ++ /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */ ++ case 3: ++ /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */ ++ case 2: ++ case 1: ++ ++ default: ++ break; ++ } ++ ++ if (tg3_flag(tp, ENABLE_APE)) ++ /* Write our heartbeat update interval to APE. */ ++ tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS, ++ APE_HOST_HEARTBEAT_INT_5SEC); ++ ++ tg3_write_sig_post_reset(tp, RESET_KIND_INIT); ++ ++ return 0; ++} ++ ++/* Called at device open time to get the chip ready for ++ * packet processing. Invoked with tp->lock held. ++ */ ++static int tg3_init_hw(struct tg3 *tp, bool reset_phy) ++{ ++ /* Chip may have been just powered on. If so, the boot code may still ++ * be running initialization. Wait for it to finish to avoid races in ++ * accessing the hardware. ++ */ ++ tg3_enable_register_access(tp); ++ tg3_poll_fw(tp); ++ ++ tg3_switch_clocks(tp); ++ ++ tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0); ++ ++ return tg3_reset_hw(tp, reset_phy); ++} ++ ++#if IS_ENABLED(CONFIG_HWMON) && !defined(__VMKLNX__) ++static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir) ++{ ++ int i; ++ ++ for (i = 0; i < TG3_SD_NUM_RECS; i++, ocir++) { ++ u32 off = i * TG3_OCIR_LEN, len = TG3_OCIR_LEN; ++ ++ tg3_ape_scratchpad_read(tp, (u32 *) ocir, off, len); ++ off += len; ++ ++ if (ocir->signature != TG3_OCIR_SIG_MAGIC || ++ !(ocir->version_flags & TG3_OCIR_FLAG_ACTIVE)) ++ memset(ocir, 0, TG3_OCIR_LEN); ++ } ++} ++ ++/* sysfs attributes for hwmon */ ++static ssize_t tg3_show_temp(struct device *dev, ++ struct device_attribute *devattr, char *buf) ++{ ++ struct pci_dev *pdev = to_pci_dev(dev); ++ struct net_device *netdev = pci_get_drvdata(pdev); ++ struct tg3 *tp = netdev_priv(netdev); ++ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); ++ u32 temperature; ++ ++ rtnl_lock(); ++ spin_lock_bh(&tp->lock); ++ tg3_ape_scratchpad_read(tp, &temperature, attr->index, ++ sizeof(temperature)); ++ spin_unlock_bh(&tp->lock); ++ rtnl_unlock(); ++ return sprintf(buf, "%u\n", temperature); ++} ++ ++ ++static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, tg3_show_temp, NULL, ++ TG3_TEMP_SENSOR_OFFSET); ++static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, tg3_show_temp, NULL, ++ TG3_TEMP_CAUTION_OFFSET); ++static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO, tg3_show_temp, NULL, ++ TG3_TEMP_MAX_OFFSET); ++ ++static struct attribute *tg3_attributes[] = { ++ &sensor_dev_attr_temp1_input.dev_attr.attr, ++ &sensor_dev_attr_temp1_crit.dev_attr.attr, ++ &sensor_dev_attr_temp1_max.dev_attr.attr, ++ NULL ++}; ++ ++static const struct attribute_group tg3_group = { ++ .attrs = tg3_attributes, ++}; ++ ++#endif ++ ++static void tg3_hwmon_close(struct tg3 *tp) ++{ ++#if IS_ENABLED(CONFIG_HWMON) && !defined(__VMKLNX__) ++ if (tp->hwmon_dev) { ++ hwmon_device_unregister(tp->hwmon_dev); ++ tp->hwmon_dev = NULL; ++ sysfs_remove_group(&tp->pdev->dev.kobj, &tg3_group); ++ } ++#endif ++} ++ ++static void tg3_hwmon_open(struct tg3 *tp) ++{ ++#if IS_ENABLED(CONFIG_HWMON) && !defined(__VMKLNX__) ++ int i, err; ++ u32 size = 0; ++ struct pci_dev *pdev = tp->pdev; ++ struct tg3_ocir ocirs[TG3_SD_NUM_RECS]; ++ ++ tg3_sd_scan_scratchpad(tp, ocirs); ++ ++ for (i = 0; i < TG3_SD_NUM_RECS; i++) { ++ if (!ocirs[i].src_data_length) ++ continue; ++ ++ size += ocirs[i].src_hdr_length; ++ size += ocirs[i].src_data_length; ++ } ++ ++ if (!size) ++ return; ++ ++ /* Register hwmon sysfs hooks */ ++ err = sysfs_create_group(&pdev->dev.kobj, &tg3_group); ++ if (err) { ++ dev_err(&pdev->dev, "Cannot create sysfs group, aborting\n"); ++ return; ++ } ++ ++ tp->hwmon_dev = hwmon_device_register(&pdev->dev); ++ if (IS_ERR(tp->hwmon_dev)) { ++ tp->hwmon_dev = NULL; ++ dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n"); ++ sysfs_remove_group(&pdev->dev.kobj, &tg3_group); ++ } ++#endif ++} ++ ++#define TG3_STAT_ADD32(PSTAT, REG) \ ++do { u32 __val = tr32(REG); \ ++ (PSTAT)->low += __val; \ ++ if ((PSTAT)->low < __val) \ ++ (PSTAT)->high += 1; \ ++} while (0) ++ ++static void tg3_periodic_fetch_stats(struct tg3 *tp) ++{ ++ struct tg3_hw_stats *sp = tp->hw_stats; ++ ++ if (!tp->link_up) ++ return; ++ ++ TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS); ++ TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS); ++ TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT); ++ TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT); ++ TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS); ++ TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS); ++ TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS); ++ TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED); ++ TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL); ++ TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL); ++ TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST); ++ TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST); ++ TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST); ++ if (unlikely(tg3_flag(tp, 5719_5720_RDMA_BUG) && ++ (sp->tx_ucast_packets.low + sp->tx_mcast_packets.low + ++ sp->tx_bcast_packets.low) > TG3_NUM_RDMA_CHANNELS)) { ++ u32 val; ++ ++ val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL); ++ val &= ~tg3_lso_rd_dma_workaround_bit(tp); ++ tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val); ++ tg3_flag_clear(tp, 5719_5720_RDMA_BUG); ++ } ++ ++ TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS); ++ TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS); ++ TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST); ++ TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST); ++ TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST); ++ TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS); ++ TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS); ++ TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD); ++ TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD); ++ TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD); ++ TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED); ++ TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG); ++ TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS); ++ TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE); ++ ++ TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT); ++ if (tg3_asic_rev(tp) != ASIC_REV_5717 && ++ tg3_asic_rev(tp) != ASIC_REV_5762 && ++ tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0 && ++ tg3_chip_rev_id(tp) != CHIPREV_ID_5720_A0) { ++ TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT); ++ } else { ++ u32 val = tr32(HOSTCC_FLOW_ATTN); ++ val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0; ++ if (val) { ++ tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM); ++ sp->rx_discards.low += val; ++ if (sp->rx_discards.low < val) ++ sp->rx_discards.high += 1; ++ } ++ sp->mbuf_lwm_thresh_hit = sp->rx_discards; ++ } ++ TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT); ++ ++#ifdef TG3_VMWARE_NETQ_ENABLE ++ tg3_vmware_fetch_stats(tp); ++#endif ++} ++ ++static void tg3_chk_missed_msi(struct tg3 *tp) ++{ ++ u32 i; ++ ++ for (i = 0; i < tp->irq_cnt; i++) { ++ struct tg3_napi *tnapi = &tp->napi[i]; ++ ++#ifdef TG3_VMWARE_NETQ_ENABLE ++ if (!(tnapi->netq.flags & TG3_NETQ_RXQ_ENABLED) && ++ !(tnapi->netq.flags & TG3_NETQ_TXQ_ALLOCATED)) ++ continue; ++#endif ++ ++ if (tg3_has_work(tnapi)) { ++ if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr && ++ tnapi->last_tx_cons == tnapi->tx_cons) { ++ if (tnapi->chk_msi_cnt < 1) { ++ tnapi->chk_msi_cnt++; ++ return; ++ } ++#ifdef BCM_HAS_NEW_IRQ_SIG ++ tg3_msi(0, tnapi); ++#else ++ tg3_msi(0, tnapi, 0); ++#endif ++ } ++ } ++ tnapi->chk_msi_cnt = 0; ++ tnapi->last_rx_cons = tnapi->rx_rcb_ptr; ++ tnapi->last_tx_cons = tnapi->tx_cons; ++ } ++} ++ ++static void tg3_timer(unsigned long __opaque) ++{ ++ struct tg3 *tp = (struct tg3 *) __opaque; ++ ++ if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING)) ++ goto restart_timer; ++ ++ spin_lock(&tp->lock); ++ ++ if (tg3_asic_rev(tp) == ASIC_REV_5717 || ++ tg3_flag(tp, 57765_CLASS)) ++ tg3_chk_missed_msi(tp); ++ ++ if (tg3_flag(tp, FLUSH_POSTED_WRITES)) { ++ /* BCM4785: Flush posted writes from GbE to host memory. */ ++ tr32(HOSTCC_MODE); ++ } ++ ++#if defined(__VMKLNX__) ++ tg3_vmware_timer(tp); ++#endif ++ ++ if (!tg3_flag(tp, TAGGED_STATUS)) { ++ /* All of this garbage is because when using non-tagged ++ * IRQ status the mailbox/status_block protocol the chip ++ * uses with the cpu is race prone. ++ */ ++ if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) { ++ tw32(GRC_LOCAL_CTRL, ++ tp->grc_local_ctrl | GRC_LCLCTRL_SETINT); ++ } else { ++ tw32(HOSTCC_MODE, tp->coalesce_mode | ++ HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW); ++ } ++ ++ if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) { ++ spin_unlock(&tp->lock); ++ tg3_reset_task_schedule(tp); ++ goto restart_timer; ++ } ++ } ++ ++ /* This part only runs once per second. */ ++ if (!--tp->timer_counter) { ++ if (tg3_flag(tp, 5705_PLUS)) ++ tg3_periodic_fetch_stats(tp); ++ ++ if (tp->setlpicnt && !--tp->setlpicnt) ++ tg3_phy_eee_enable(tp); ++ ++ if (tg3_flag(tp, USE_LINKCHG_REG)) { ++ u32 mac_stat; ++ int phy_event; ++ ++ mac_stat = tr32(MAC_STATUS); ++ ++ phy_event = 0; ++ if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) { ++ if (mac_stat & MAC_STATUS_MI_INTERRUPT) ++ phy_event = 1; ++ } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED) ++ phy_event = 1; ++ ++ if (phy_event) ++ tg3_setup_phy(tp, false); ++ } else if (tg3_flag(tp, POLL_SERDES)) { ++ u32 mac_stat = tr32(MAC_STATUS); ++ int need_setup = 0; ++ ++ if (tp->link_up && ++ (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) { ++ need_setup = 1; ++ } ++ if (!tp->link_up && ++ (mac_stat & (MAC_STATUS_PCS_SYNCED | ++ MAC_STATUS_SIGNAL_DET))) { ++ need_setup = 1; ++ } ++ if (need_setup) { ++ if (!tp->serdes_counter) { ++ tw32_f(MAC_MODE, ++ (tp->mac_mode & ++ ~MAC_MODE_PORT_MODE_MASK)); ++ udelay(40); ++ tw32_f(MAC_MODE, tp->mac_mode); ++ udelay(40); ++ } ++ tg3_setup_phy(tp, false); ++ } ++ } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) && ++ tg3_flag(tp, 5780_CLASS)) { ++ tg3_serdes_parallel_detect(tp); ++ } else if (tg3_flag(tp, POLL_CPMU_LINK)) { ++ u32 cpmu = tr32(TG3_CPMU_STATUS); ++ bool link_up = !((cpmu & TG3_CPMU_STATUS_LINK_MASK) == ++ TG3_CPMU_STATUS_LINK_MASK); ++ ++ if (link_up != tp->link_up) ++ tg3_setup_phy(tp, false); ++ } ++ ++ tp->timer_counter = tp->timer_multiplier; ++ } ++ ++ /* Heartbeat is only sent once every 2 seconds. ++ * ++ * The heartbeat is to tell the ASF firmware that the host ++ * driver is still alive. In the event that the OS crashes, ++ * ASF needs to reset the hardware to free up the FIFO space ++ * that may be filled with rx packets destined for the host. ++ * If the FIFO is full, ASF will no longer function properly. ++ * ++ * Unintended resets have been reported on real time kernels ++ * where the timer doesn't run on time. Netpoll will also have ++ * same problem. ++ * ++ * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware ++ * to check the ring condition when the heartbeat is expiring ++ * before doing the reset. This will prevent most unintended ++ * resets. ++ */ ++ if (!--tp->asf_counter) { ++ if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) { ++ tg3_wait_for_event_ack(tp); ++ ++ tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, ++ FWCMD_NICDRV_ALIVE3); ++ tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4); ++ tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, ++ TG3_FW_UPDATE_TIMEOUT_SEC); ++ ++ tg3_generate_fw_event(tp); ++ } ++ tp->asf_counter = tp->asf_multiplier; ++ } ++ ++ /* Update the APE heartbeat every 5 seconds.*/ ++ tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL); ++ ++ spin_unlock(&tp->lock); ++ ++restart_timer: ++ tp->timer.expires = jiffies + tp->timer_offset; ++ add_timer(&tp->timer); ++} ++ ++static void __devinit tg3_timer_init(struct tg3 *tp) ++{ ++ if (tg3_flag(tp, TAGGED_STATUS) && ++ tg3_asic_rev(tp) != ASIC_REV_5717 && ++ !tg3_flag(tp, 57765_CLASS)) ++ tp->timer_offset = HZ; ++ else ++ tp->timer_offset = HZ / 10; ++ ++ BUG_ON(tp->timer_offset > HZ); ++ ++ tp->timer_multiplier = (HZ / tp->timer_offset); ++ tp->asf_multiplier = (HZ / tp->timer_offset) * ++ TG3_FW_UPDATE_FREQ_SEC; ++ ++ init_timer(&tp->timer); ++ tp->timer.data = (unsigned long) tp; ++ tp->timer.function = tg3_timer; ++} ++ ++static void tg3_timer_start(struct tg3 *tp) ++{ ++ tp->asf_counter = tp->asf_multiplier; ++ tp->timer_counter = tp->timer_multiplier; ++ ++ tp->timer.expires = jiffies + tp->timer_offset; ++ add_timer(&tp->timer); ++} ++ ++static void tg3_timer_stop(struct tg3 *tp) ++{ ++ del_timer_sync(&tp->timer); ++} ++ ++/* Restart hardware after configuration changes, self-test, etc. ++ * Invoked with tp->lock held. ++ */ ++static int tg3_restart_hw(struct tg3 *tp, bool reset_phy) ++ __releases(tp->lock) ++ __acquires(tp->lock) ++{ ++ int err; ++ ++ err = tg3_init_hw(tp, reset_phy); ++ if (err) { ++ netdev_err(tp->dev, ++ "Failed to re-initialize device, aborting\n"); ++ tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); ++ tg3_full_unlock(tp); ++ tg3_timer_stop(tp); ++ tp->irq_sync = 0; ++ tg3_napi_enable(tp); ++ dev_close(tp->dev); ++ tg3_full_lock(tp, 0); ++ } ++ return err; ++} ++ ++#ifdef BCM_HAS_NEW_INIT_WORK ++static void tg3_reset_task(struct work_struct *work) ++#else ++static void tg3_reset_task(void *_data) ++#endif ++{ ++#ifdef BCM_HAS_NEW_INIT_WORK ++ struct tg3 *tp = container_of(work, struct tg3, reset_task); ++#else ++ struct tg3 *tp = _data; ++#endif ++ int err; ++ ++ tg3_full_lock(tp, 0); ++ ++ if (!netif_running(tp->dev)) { ++ tg3_flag_clear(tp, RESET_TASK_PENDING); ++ tg3_full_unlock(tp); ++ return; ++ } ++ ++ tg3_full_unlock(tp); ++ ++#ifdef TG3_VMWARE_NETQ_ENABLE ++ /* Prevent any netqueue operations while we are resetting. */ ++ if (tg3_flag(tp, ENABLE_IOV)) ++ rtnl_lock(); ++#endif ++ ++#if !defined(__VMKLNX__) ++ rtnl_lock(); ++ ++ if (tp->unrecoverable_err) { ++ dev_close(tp->dev); ++ netdev_err(tp->dev, "Device moved to closed state due to unrecoverable error\n"); ++ goto out2; ++ } ++#endif ++ ++ tg3_phy_stop(tp); ++ ++ tg3_netif_stop(tp); ++ ++#ifdef TG3_VMWARE_NETQ_ENABLE ++ tg3_netq_invalidate_state(tp); ++#endif ++ ++ tg3_full_lock(tp, 1); ++ ++ if (tg3_flag(tp, TX_RECOVERY_PENDING)) { ++ tp->write32_tx_mbox = tg3_write32_tx_mbox; ++ tp->write32_rx_mbox = tg3_write_flush_reg32; ++ tg3_flag_set(tp, MBOX_WRITE_REORDER); ++ tg3_flag_clear(tp, TX_RECOVERY_PENDING); ++ } ++ ++ tg3_halt(tp, RESET_KIND_SHUTDOWN, 0); ++ err = tg3_init_hw(tp, true); ++#if defined(__VMKLNX__) ++ if (err) { ++ if (printk_ratelimit()) { ++ printk(KERN_ERR "tg3_init_hw failed in tg3_init_task\n"); ++ } ++ tp->irq_sync = 0; ++ tg3_napi_enable(tp); ++ goto out; ++ } ++#else /* !defined(__VMKLNX__) */ ++ if (err) ++ goto out; ++#endif /* defined(__VMKLNX__) */ ++ ++ tg3_netif_start(tp); ++ ++out: ++ tg3_full_unlock(tp); ++ ++ if (!err) ++ tg3_phy_start(tp); ++ ++#ifdef TG3_VMWARE_NETQ_ENABLE ++ if (tg3_flag(tp, ENABLE_IOV)) ++ rtnl_unlock(); ++#endif ++ ++#if !defined(__VMKLNX__) ++out2: ++ rtnl_unlock(); ++#endif ++ ++ tg3_flag_clear(tp, RESET_TASK_PENDING); ++} ++ ++static int tg3_request_irq(struct tg3 *tp, int irq_num) ++{ ++#ifdef BCM_HAS_NEW_IRQ_SIG ++ irq_handler_t fn; ++#else ++ irqreturn_t (*fn)(int, void *, struct pt_regs *); ++#endif ++ unsigned long flags; ++ char *name; ++ struct tg3_napi *tnapi = &tp->napi[irq_num]; ++ ++ if (tp->irq_cnt == 1) ++ name = tp->dev->name; ++ else { ++ name = &tnapi->irq_lbl[0]; ++ if (tnapi->tx_buffers && tnapi->rx_rcb) ++ snprintf(name, IFNAMSIZ, ++ "%s-txrx-%d", tp->dev->name, irq_num); ++ else if (tnapi->tx_buffers) ++ snprintf(name, IFNAMSIZ, ++ "%s-tx-%d", tp->dev->name, irq_num); ++ else if (tnapi->rx_rcb) ++ snprintf(name, IFNAMSIZ, ++ "%s-rx-%d", tp->dev->name, irq_num); ++ else ++ snprintf(name, IFNAMSIZ, ++ "%s-%d", tp->dev->name, irq_num); ++ name[IFNAMSIZ-1] = 0; ++ } ++ ++ if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) { ++ fn = tg3_msi; ++ if (tg3_flag(tp, 1SHOT_MSI)) ++ fn = tg3_msi_1shot; ++ flags = 0; ++ } else { ++ fn = tg3_interrupt; ++ if (tg3_flag(tp, TAGGED_STATUS)) ++ fn = tg3_interrupt_tagged; ++ flags = IRQF_SHARED; ++ } ++ ++ return request_irq(tnapi->irq_vec, fn, flags, name, tnapi); ++} ++ ++static int tg3_test_interrupt(struct tg3 *tp) ++{ ++ struct tg3_napi *tnapi = &tp->napi[0]; ++ struct net_device *dev = tp->dev; ++ int err, i, intr_ok = 0; ++ u32 val; ++ ++ if (!netif_running(dev)) ++ return -ENODEV; ++ ++ tg3_disable_ints(tp); ++ ++ free_irq(tnapi->irq_vec, tnapi); ++ ++ /* ++ * Turn off MSI one shot mode. Otherwise this test has no ++ * observable way to know whether the interrupt was delivered. ++ */ ++ if (tg3_flag(tp, 57765_PLUS)) { ++ val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE; ++ tw32(MSGINT_MODE, val); ++ } ++ ++ err = request_irq(tnapi->irq_vec, tg3_test_isr, ++ IRQF_SHARED, dev->name, tnapi); ++ if (err) ++ return err; ++ ++ tnapi->hw_status->status &= ~SD_STATUS_UPDATED; ++ tg3_enable_ints(tp); ++ ++ tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE | ++ tnapi->coal_now); ++ ++ for (i = 0; i < 5; i++) { ++ u32 int_mbox, misc_host_ctrl; ++ ++ int_mbox = tr32_mailbox(tnapi->int_mbox); ++ misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL); ++ ++ if ((int_mbox != 0) || ++ (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) { ++ intr_ok = 1; ++ break; ++ } ++ ++ if (tg3_flag(tp, 57765_PLUS) && ++ tnapi->hw_status->status_tag != tnapi->last_tag) ++ tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24); ++ ++ msleep(10); ++ } ++ ++ tg3_disable_ints(tp); ++ ++ free_irq(tnapi->irq_vec, tnapi); ++ ++ err = tg3_request_irq(tp, 0); ++ ++ if (err) ++ return err; ++ ++ if (intr_ok) { ++ /* Reenable MSI one shot mode. */ ++ if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) { ++ val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE; ++ tw32(MSGINT_MODE, val); ++ } ++ return 0; ++ } ++ ++ return -EIO; ++} ++ ++#ifdef CONFIG_PCI_MSI ++/* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is ++ * successfully restored ++ */ ++static int tg3_test_msi(struct tg3 *tp) ++{ ++ int err; ++ u16 pci_cmd; ++ ++ if (!tg3_flag(tp, USING_MSI)) ++ return 0; ++ ++ /* Turn off SERR reporting in case MSI terminates with Master ++ * Abort. ++ */ ++ pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd); ++ pci_write_config_word(tp->pdev, PCI_COMMAND, ++ pci_cmd & ~PCI_COMMAND_SERR); ++ ++ err = tg3_test_interrupt(tp); ++ ++ pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd); ++ ++ if (!err) ++ return 0; ++ ++ /* other failures */ ++ if (err != -EIO) ++ return err; ++ ++ /* MSI test failed, go back to INTx mode */ ++ netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching " ++ "to INTx mode. Please report this failure to the PCI " ++ "maintainer and include system chipset information\n"); ++ ++ free_irq(tp->napi[0].irq_vec, &tp->napi[0]); ++ ++ pci_disable_msi(tp->pdev); ++ ++ tg3_flag_clear(tp, USING_MSI); ++ tp->napi[0].irq_vec = tp->pdev->irq; ++ ++ err = tg3_request_irq(tp, 0); ++ if (err) ++ return err; ++ ++ /* Need to reset the chip because the MSI cycle may have terminated ++ * with Master Abort. ++ */ ++ tg3_full_lock(tp, 1); ++ ++ tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); ++ err = tg3_init_hw(tp, true); ++ ++ tg3_full_unlock(tp); ++ ++ if (err) ++ free_irq(tp->napi[0].irq_vec, &tp->napi[0]); ++ ++ return err; ++} ++#endif /* CONFIG_PCI_MSI */ ++ ++static int tg3_request_firmware(struct tg3 *tp) ++{ ++ const struct tg3_firmware_hdr *fw_hdr; ++ ++ if (tg3_priv_request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) { ++ netdev_err(tp->dev, "Failed to load firmware \"%s\"\n", ++ tp->fw_needed); ++ return -ENOENT; ++ } ++ ++ fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data; ++ ++ /* Firmware blob starts with version numbers, followed by ++ * start address and _full_ length including BSS sections ++ * (which must be longer than the actual data, of course ++ */ ++ ++ tp->fw_len = fw_hdr->len; /* includes bss */ ++ if (tp->fw_len < (tp->fw->size - TG3_FW_HDR_LEN)) { ++ netdev_err(tp->dev, "bogus length %d in \"%s\"\n", ++ tp->fw_len, tp->fw_needed); ++ tg3_priv_release_firmware(tp->fw); ++ tp->fw = NULL; ++ return -EINVAL; ++ } ++ ++ /* We no longer need firmware; we have it. */ ++ tp->fw_needed = NULL; ++ return 0; ++} ++ ++#if defined(CONFIG_PCI_MSI) ++static bool tg3_ints_alloc_vectors(struct tg3 *tp) ++{ ++ int i, rc; ++ struct msix_entry msix_ent[TG3_IRQ_MAX_VECS]; ++ ++ for (i = 0; i < tp->irq_max; i++) { ++ msix_ent[i].entry = i; ++ msix_ent[i].vector = 0; ++ } ++ ++ rc = tp->irq_cnt; ++ while (1) { ++ int ret; ++ ++#ifdef TG3_VMWARE_NETQ_ENABLE ++ if (!tg3_flag(tp, IOV_CAPABLE)) ++#endif ++ /* If the kernel says that only two MSI-X ++ * vectors are available, fallback to a simpler ++ * single queue, single vector MSI-X mode. ++ */ ++ if (rc == 2) ++ rc--; ++ ++ ret = pci_enable_msix(tp->pdev, msix_ent, rc); ++ if (ret < 0) ++ return false; ++ else if (ret == 0) ++ break; ++ rc = ret; ++ } ++ tp->irq_cnt = rc; ++ ++ for (i = 0; i < tp->irq_max; i++) ++ tp->napi[i].irq_vec = msix_ent[i].vector; ++ ++ return true; ++} ++ ++static inline u32 tg3_irq_count(struct tg3 *tp) ++{ ++ u32 irq_cnt = max(tp->rxq_cnt, tp->txq_cnt); ++#if defined(TG3_INBOX) ++ return TG3_IRQ_MAX_VECS; ++#endif ++ if (irq_cnt > 1) { ++ /* We want as many rx rings enabled as there are cpus. ++ * In multiqueue MSI-X mode, the first MSI-X vector ++ * only deals with link interrupts, etc, so we add ++ * one to the number of vectors we are requesting. ++ */ ++ irq_cnt = min_t(unsigned, irq_cnt + 1, tp->irq_max); ++ } ++ ++#ifdef TG3_VMWARE_NETQ_ENABLE ++ if (tg3_flag(tp, IOV_CAPABLE)) ++ irq_cnt = tg3_netq_tune_vector_count(tp); ++#endif ++ ++ return irq_cnt; ++} ++ ++static bool tg3_enable_msix(struct tg3 *tp) ++{ ++ u32 cpus, irq_cnt; ++ ++ cpus = num_online_cpus(); ++ ++ tp->txq_cnt = tp->txq_req; ++ tp->rxq_cnt = tp->rxq_req; ++ ++ /* Disable multiple TX rings by default. Simple round-robin hardware ++ * scheduling of the TX rings can cause starvation of rings with ++ * small packets when other rings have TSO or jumbo packets. ++ */ ++ if (!tp->txq_cnt) ++ tp->txq_cnt = 1; ++ if (!tp->rxq_cnt) ++ tp->rxq_cnt = min(cpus, tp->rxq_max); ++ ++#ifdef TG3_VMWARE_NETQ_ENABLE ++ tg3_netq_limit_dflt_queue_counts(tp); ++#endif ++ ++ irq_cnt = tg3_irq_count(tp); ++ ++ tp->irq_cnt = irq_cnt; ++ while (tp->irq_cnt) { ++ u32 rxq_cnt, new_irq_cnt; ++ ++ if (!tg3_ints_alloc_vectors(tp)) ++ return false; ++ ++ /* If the number of interrupts is less than our desired queue ++ * count, adjust the queue count downwards to match. ++ */ ++ rxq_cnt = tp->irq_cnt; ++#ifdef TG3_VMWARE_NETQ_ENABLE ++ if (!tg3_flag(tp, IOV_CAPABLE)) ++#endif ++ if (tp->irq_cnt > 1) ++ rxq_cnt--; ++ ++ rxq_cnt = min(rxq_cnt, tp->rxq_cnt); ++ tp->rxq_cnt = rxq_cnt; ++ ++#ifdef BCM_HAS_STRUCT_NETDEV_QUEUE ++ while (rxq_cnt) { ++ if (netif_set_real_num_rx_queues(tp->dev, rxq_cnt)) ++ rxq_cnt--; ++ else ++ break; ++ } ++ ++ if (!rxq_cnt) { ++ pci_disable_msix(tp->pdev); ++ return false; ++ } ++#endif /* BCM_HAS_STRUCT_NETDEV_QUEUE */ ++ ++ if (tp->rxq_cnt == rxq_cnt) ++ break; ++ ++ tp->rxq_cnt = rxq_cnt; ++ ++ /* See if we can free up any unused MSI-X vectors. */ ++ new_irq_cnt = tg3_irq_count(tp); ++ ++ /* If the IRQ count is the same, we need ++ * the extra interrupts for the tx side. ++ */ ++ if (irq_cnt == new_irq_cnt) ++ break; ++ ++ /* Free unused interrupts and reallocate the exact amount. */ ++ pci_disable_msix(tp->pdev); ++ tp->irq_cnt = new_irq_cnt; ++ } ++ ++ if (irq_cnt != tp->irq_cnt) ++ netdev_notice(tp->dev, ++ "Requested %d MSI-X vectors, received %d\n", ++ irq_cnt, tp->irq_cnt); ++ ++ if (tp->irq_cnt == 1) ++ return true; ++ ++ /* If more than one interrupt vector is allocated, we _need_ to enable ++ * either IOV mode or RSS mode, even if only one rx queue is desired. ++ * If we don't, TSS will not work. ++ */ ++#ifdef TG3_VMWARE_NETQ_ENABLE ++ if (tg3_flag(tp, IOV_CAPABLE)) { ++ tg3_flag_set(tp, ENABLE_IOV); ++ } else ++#endif ++ tg3_flag_set(tp, ENABLE_RSS); ++ ++ tp->txq_cnt = min(tp->txq_cnt, tp->irq_cnt - 1); ++ if (tp->txq_cnt > 1) ++ tg3_flag_set(tp, ENABLE_TSS); ++ ++#ifdef BCM_HAS_STRUCT_NETDEV_QUEUE ++ netif_set_real_num_tx_queues(tp->dev, tp->txq_cnt); ++#endif ++ ++ return true; ++} ++#endif ++ ++static void tg3_ints_init(struct tg3 *tp) ++{ ++#ifdef CONFIG_PCI_MSI ++ if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) && ++ !tg3_flag(tp, TAGGED_STATUS)) { ++ /* All MSI supporting chips should support tagged ++ * status. Assert that this is the case. ++ */ ++ netdev_warn(tp->dev, ++ "MSI without TAGGED_STATUS? Not using MSI\n"); ++ goto defcfg; ++ } ++ ++ if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp)) ++ tg3_flag_set(tp, USING_MSIX); ++ else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0) ++ tg3_flag_set(tp, USING_MSI); ++ ++ tg3_5780_class_intx_workaround(tp); ++ ++ if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) { ++ u32 msi_mode = tr32(MSGINT_MODE); ++ if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1) ++ msi_mode |= MSGINT_MODE_MULTIVEC_EN; ++ if (!tg3_flag(tp, 1SHOT_MSI)) ++ msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE; ++ tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE); ++ } ++defcfg: ++#endif ++ ++ if (!tg3_flag(tp, USING_MSIX)) { ++ tp->irq_cnt = 1; ++ tp->napi[0].irq_vec = tp->pdev->irq; ++ } ++ ++ if (tp->irq_cnt == 1) { ++ tp->txq_cnt = 1; ++ tp->rxq_cnt = 1; ++#ifdef BCM_HAS_STRUCT_NETDEV_QUEUE ++ netif_set_real_num_tx_queues(tp->dev, 1); ++ netif_set_real_num_rx_queues(tp->dev, 1); ++#endif ++ } ++} ++ ++static void tg3_ints_fini(struct tg3 *tp) ++{ ++#ifdef CONFIG_PCI_MSI ++ if (tg3_flag(tp, USING_MSIX)) ++ pci_disable_msix(tp->pdev); ++ else if (tg3_flag(tp, USING_MSI)) ++ pci_disable_msi(tp->pdev); ++#endif ++ tg3_flag_clear(tp, USING_MSI); ++ tg3_flag_clear(tp, USING_MSIX); ++ tg3_flag_clear(tp, ENABLE_RSS); ++ tg3_flag_clear(tp, ENABLE_TSS); ++#ifdef TG3_VMWARE_NETQ_ENABLE ++ tg3_flag_clear(tp, ENABLE_IOV); ++#endif ++} ++ ++static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq, ++ bool init) ++{ ++ struct net_device *dev = tp->dev; ++ int i, err; ++ ++ /* ++ * Setup interrupts first so we know how ++ * many NAPI resources to allocate ++ */ ++ tg3_ints_init(tp); ++ ++ tg3_rss_check_indir_tbl(tp); ++ ++ /* The placement of this call is tied ++ * to the setup and use of Host TX descriptors. ++ */ ++ err = tg3_alloc_consistent(tp); ++ if (err) ++ goto out_ints_fini; ++ ++ tg3_napi_init(tp); ++ ++ /* napi is disabled by default after init ++ * Assertion may occur when freeing an IRQ vector ++ * that has NAPI scheduled and associated. Thus, ++ * we need to ensure napi is disabled prior to ++ * freeing an irq. ++ */ ++ for (i = 0; i < tp->irq_cnt; i++) { ++ struct tg3_napi *tnapi = &tp->napi[i]; ++ err = tg3_request_irq(tp, i); ++ if (err) { ++ for (i--; i >= 0; i--) { ++ tnapi = &tp->napi[i]; ++ free_irq(tnapi->irq_vec, tnapi); ++ } ++ goto out_napi_fini; ++ } ++ } ++ ++ if (init) ++ tg3_ape_driver_state_change(tp, RESET_KIND_INIT); ++ ++ tg3_full_lock(tp, 0); ++ ++ err = tg3_init_hw(tp, reset_phy); ++ if (err) { ++ tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); ++ tg3_free_rings(tp); ++ } ++ ++ tg3_full_unlock(tp); ++ ++ if (err) ++ goto out_free_irq; ++ ++#ifdef CONFIG_PCI_MSI ++ if (test_irq && tg3_flag(tp, USING_MSI)) { ++ err = tg3_test_msi(tp); ++ ++ if (err) { ++ tg3_full_lock(tp, 0); ++ tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); ++ tg3_free_rings(tp); ++ tg3_full_unlock(tp); ++ ++ goto out_napi_fini; ++ } ++ ++ if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) { ++ u32 val = tr32(PCIE_TRANSACTION_CFG); ++ ++ tw32(PCIE_TRANSACTION_CFG, ++ val | PCIE_TRANS_CFG_1SHOT_MSI); ++ } ++ } ++#endif ++ ++ tg3_napi_enable(tp); ++ ++ tg3_phy_start(tp); ++ ++ tg3_hwmon_open(tp); ++ ++ tg3_full_lock(tp, 0); ++ ++ tg3_timer_start(tp); ++ ++ /* JIRA-20238: This fix is to make sure the first heartbeat ++ * occurs within 5 second interval even if the jiffy value ++ * is very high. When using time_after() check a higher jiffy ++ * value makes it –ve when it is type casted to long on 32 bit ++ * kernel, as long is only 4 bytes. Due to this time_after ++ * check will not provide incorrect result. ++ */ ++ if (tg3_flag(tp, ENABLE_APE)) ++ tp->ape_hb_jiffies = jiffies; ++ ++ tg3_flag_set(tp, INIT_COMPLETE); ++ if (init) ++ tg3_ptp_init(tp); ++ else ++ tg3_ptp_resume(tp); ++ ++ tg3_enable_ints(tp); ++ ++ tg3_full_unlock(tp); ++ ++ netif_tx_start_all_queues(dev); ++ ++#ifdef BCM_HAS_FIX_FEATURES ++ /* ++ * Reset loopback feature if it was turned on while the device was down ++ * make sure that it's installed properly now. ++ */ ++ if (dev->features & NETIF_F_LOOPBACK) ++ tg3_set_loopback(dev, dev->features); ++#endif ++ ++ return 0; ++ ++out_free_irq: ++ for (i = tp->irq_cnt - 1; i >= 0; i--) { ++ struct tg3_napi *tnapi = &tp->napi[i]; ++ free_irq(tnapi->irq_vec, tnapi); ++ } ++ ++out_napi_fini: ++ tg3_napi_fini(tp); ++ tg3_free_consistent(tp); ++ ++out_ints_fini: ++ tg3_ints_fini(tp); ++ ++ return err; ++} ++ ++static void tg3_stop(struct tg3 *tp) ++{ ++ int i; ++ ++#if !defined(__VMKLNX__) ++ if (!tp->unrecoverable_err) ++ tg3_reset_task_cancel(tp); ++#else ++ tg3_reset_task_cancel(tp); ++#endif ++ ++ tg3_netif_stop(tp); ++ ++ tg3_timer_stop(tp); ++ ++ tg3_hwmon_close(tp); ++ ++ tg3_phy_stop(tp); ++ ++#ifdef TG3_VMWARE_NETQ_ENABLE ++ tg3_netq_invalidate_state(tp); ++#endif ++ ++ tg3_full_lock(tp, 1); ++ ++ tg3_disable_ints(tp); ++ ++ tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); ++ tg3_free_rings(tp); ++ tg3_flag_clear(tp, INIT_COMPLETE); ++ ++ tg3_full_unlock(tp); ++ ++ /* napi should be disabled after netif_stop already */ ++ for (i = tp->irq_cnt - 1; i >= 0; i--) { ++ struct tg3_napi *tnapi = &tp->napi[i]; ++ free_irq(tnapi->irq_vec, tnapi); ++ } ++ ++ tg3_napi_fini(tp); ++ ++ tg3_ints_fini(tp); ++ ++#ifdef TG3_VMWARE_NETQ_ENABLE ++ tg3_netq_stats_clear(tp); ++#endif ++ ++ tg3_free_consistent(tp); ++} ++ ++static int tg3_open(struct net_device *dev) ++{ ++ struct tg3 *tp = netdev_priv(dev); ++ int err; ++ ++ if (tp->fw_needed) { ++ err = tg3_request_firmware(tp); ++ if (tg3_asic_rev(tp) == ASIC_REV_57766) { ++ if (err) { ++ netdev_warn(tp->dev, "EEE capability disabled\n"); ++ tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP; ++ } else if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) { ++ netdev_warn(tp->dev, "EEE capability restored\n"); ++ tp->phy_flags |= TG3_PHYFLG_EEE_CAP; ++ } ++ } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) { ++ if (err) ++ return err; ++ } else if (err) { ++ netdev_warn(tp->dev, "TSO capability disabled\n"); ++ tg3_flag_clear(tp, TSO_CAPABLE); ++ } else if (!tg3_flag(tp, TSO_CAPABLE)) { ++ netdev_notice(tp->dev, "TSO capability restored\n"); ++ tg3_flag_set(tp, TSO_CAPABLE); ++ } ++ } ++ ++ tg3_carrier_off(tp); ++ ++ err = tg3_power_up(tp); ++ if (err) ++ return err; ++ ++ tg3_full_lock(tp, 0); ++ ++ tg3_disable_ints(tp); ++ tg3_flag_clear(tp, INIT_COMPLETE); ++ ++ tg3_full_unlock(tp); ++ ++ err = tg3_start(tp, ++ !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN), ++ true, true); ++ if (err) { ++ tg3_frob_aux_power(tp, false); ++ pci_set_power_state(tp->pdev, PCI_D3hot); ++ } ++ ++#if IS_ENABLED(CONFIG_PTP_1588_CLOCK) ++ if (tg3_flag(tp, PTP_CAPABLE)) { ++#ifdef BCM_HAS_PTP_CLOCK_REG_HAS_PARENT ++ tp->ptp_clock = ptp_clock_register(&tp->ptp_info, ++ &tp->pdev->dev); ++#else ++ tp->ptp_clock = ptp_clock_register(&tp->ptp_info); ++#endif ++ if (IS_ERR(tp->ptp_clock)) ++ tp->ptp_clock = NULL; ++ } ++#endif ++ ++ return err; ++} ++ ++static int tg3_close(struct net_device *dev) ++{ ++ struct tg3 *tp = netdev_priv(dev); ++ ++ tg3_ptp_fini(tp); ++ ++ tg3_stop(tp); ++ ++ tg3_flag_clear(tp, INIT_COMPLETE); ++ ++ /* Clear stats across close / open calls */ ++ memset(&tp->net_stats_prev, 0, sizeof(tp->net_stats_prev)); ++ memset(&tp->estats_prev, 0, sizeof(tp->estats_prev)); ++ ++ if (pci_device_is_present(tp->pdev)) { ++ tg3_power_down_prepare(tp); ++ ++ tg3_carrier_off(tp); ++ } ++ return 0; ++} ++ ++static inline u64 get_stat64(tg3_stat64_t *val) ++{ ++ return ((u64)val->high << 32) | ((u64)val->low); ++} ++ ++static u64 tg3_calc_crc_errors(struct tg3 *tp) ++{ ++ struct tg3_hw_stats *hw_stats = tp->hw_stats; ++ ++ if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) && ++ (tg3_asic_rev(tp) == ASIC_REV_5700 || ++ tg3_asic_rev(tp) == ASIC_REV_5701)) { ++ u32 val; ++ ++ if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) { ++ tg3_writephy(tp, MII_TG3_TEST1, ++ val | MII_TG3_TEST1_CRC_EN); ++ tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val); ++ } else ++ val = 0; ++ ++ tp->phy_crc_errors += val; ++ ++ return tp->phy_crc_errors; ++ } ++ ++ return get_stat64(&hw_stats->rx_fcs_errors); ++} ++ ++#define ESTAT_ADD(member) \ ++ estats->member = old_estats->member + \ ++ get_stat64(&hw_stats->member) ++ ++static void tg3_get_estats(struct tg3 *tp, struct tg3_ethtool_stats *estats) ++{ ++ struct tg3_ethtool_stats *old_estats = &tp->estats_prev; ++ struct tg3_hw_stats *hw_stats = tp->hw_stats; ++ ++ ESTAT_ADD(rx_octets); ++ ESTAT_ADD(rx_fragments); ++ ESTAT_ADD(rx_ucast_packets); ++ ESTAT_ADD(rx_mcast_packets); ++ ESTAT_ADD(rx_bcast_packets); ++ ESTAT_ADD(rx_fcs_errors); ++ ESTAT_ADD(rx_align_errors); ++ ESTAT_ADD(rx_xon_pause_rcvd); ++ ESTAT_ADD(rx_xoff_pause_rcvd); ++ ESTAT_ADD(rx_mac_ctrl_rcvd); ++ ESTAT_ADD(rx_xoff_entered); ++ ESTAT_ADD(rx_frame_too_long_errors); ++ ESTAT_ADD(rx_jabbers); ++ ESTAT_ADD(rx_undersize_packets); ++ ESTAT_ADD(rx_in_length_errors); ++ ESTAT_ADD(rx_out_length_errors); ++ ESTAT_ADD(rx_64_or_less_octet_packets); ++ ESTAT_ADD(rx_65_to_127_octet_packets); ++ ESTAT_ADD(rx_128_to_255_octet_packets); ++ ESTAT_ADD(rx_256_to_511_octet_packets); ++ ESTAT_ADD(rx_512_to_1023_octet_packets); ++ ESTAT_ADD(rx_1024_to_1522_octet_packets); ++ ESTAT_ADD(rx_1523_to_2047_octet_packets); ++ ESTAT_ADD(rx_2048_to_4095_octet_packets); ++ ESTAT_ADD(rx_4096_to_8191_octet_packets); ++ ESTAT_ADD(rx_8192_to_9022_octet_packets); ++ ++ ESTAT_ADD(tx_octets); ++ ESTAT_ADD(tx_collisions); ++ ESTAT_ADD(tx_xon_sent); ++ ESTAT_ADD(tx_xoff_sent); ++ ESTAT_ADD(tx_flow_control); ++ ESTAT_ADD(tx_mac_errors); ++ ESTAT_ADD(tx_single_collisions); ++ ESTAT_ADD(tx_mult_collisions); ++ ESTAT_ADD(tx_deferred); ++ ESTAT_ADD(tx_excessive_collisions); ++ ESTAT_ADD(tx_late_collisions); ++ ESTAT_ADD(tx_collide_2times); ++ ESTAT_ADD(tx_collide_3times); ++ ESTAT_ADD(tx_collide_4times); ++ ESTAT_ADD(tx_collide_5times); ++ ESTAT_ADD(tx_collide_6times); ++ ESTAT_ADD(tx_collide_7times); ++ ESTAT_ADD(tx_collide_8times); ++ ESTAT_ADD(tx_collide_9times); ++ ESTAT_ADD(tx_collide_10times); ++ ESTAT_ADD(tx_collide_11times); ++ ESTAT_ADD(tx_collide_12times); ++ ESTAT_ADD(tx_collide_13times); ++ ESTAT_ADD(tx_collide_14times); ++ ESTAT_ADD(tx_collide_15times); ++ ESTAT_ADD(tx_ucast_packets); ++ ESTAT_ADD(tx_mcast_packets); ++ ESTAT_ADD(tx_bcast_packets); ++ ESTAT_ADD(tx_carrier_sense_errors); ++ ESTAT_ADD(tx_discards); ++ ESTAT_ADD(tx_errors); ++ ++ ESTAT_ADD(dma_writeq_full); ++ ESTAT_ADD(dma_write_prioq_full); ++ ESTAT_ADD(rxbds_empty); ++ ESTAT_ADD(rx_discards); ++ ESTAT_ADD(rx_errors); ++ ESTAT_ADD(rx_threshold_hit); ++ ++ ESTAT_ADD(dma_readq_full); ++ ESTAT_ADD(dma_read_prioq_full); ++ ESTAT_ADD(tx_comp_queue_full); ++ ++ ESTAT_ADD(ring_set_send_prod_index); ++ ESTAT_ADD(ring_status_update); ++ ESTAT_ADD(nic_irqs); ++ ESTAT_ADD(nic_avoided_irqs); ++ ESTAT_ADD(nic_tx_threshold_hit); ++ ++ ESTAT_ADD(mbuf_lwm_thresh_hit); ++ estats->dma_4g_cross = tp->dma_4g_cross; ++#if !defined(__VMKLNX__) ++ estats->recoverable_err = tp->recoverable_err; ++ estats->unrecoverable_err = tp->unrecoverable_err; ++#endif ++} ++ ++static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats) ++{ ++ struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev; ++ struct tg3_hw_stats *hw_stats = tp->hw_stats; ++ ++ stats->rx_packets = old_stats->rx_packets + ++ get_stat64(&hw_stats->rx_ucast_packets) + ++ get_stat64(&hw_stats->rx_mcast_packets) + ++ get_stat64(&hw_stats->rx_bcast_packets); ++ ++ stats->tx_packets = old_stats->tx_packets + ++ get_stat64(&hw_stats->tx_ucast_packets) + ++ get_stat64(&hw_stats->tx_mcast_packets) + ++ get_stat64(&hw_stats->tx_bcast_packets); ++ ++ stats->rx_bytes = old_stats->rx_bytes + ++ get_stat64(&hw_stats->rx_octets); ++ stats->tx_bytes = old_stats->tx_bytes + ++ get_stat64(&hw_stats->tx_octets); ++ ++ stats->rx_errors = old_stats->rx_errors + ++ get_stat64(&hw_stats->rx_errors); ++ stats->tx_errors = old_stats->tx_errors + ++ get_stat64(&hw_stats->tx_errors) + ++ get_stat64(&hw_stats->tx_mac_errors) + ++ get_stat64(&hw_stats->tx_carrier_sense_errors) + ++ get_stat64(&hw_stats->tx_discards); ++ ++ stats->multicast = old_stats->multicast + ++ get_stat64(&hw_stats->rx_mcast_packets); ++ stats->collisions = old_stats->collisions + ++ get_stat64(&hw_stats->tx_collisions); ++ ++ stats->rx_length_errors = old_stats->rx_length_errors + ++ get_stat64(&hw_stats->rx_frame_too_long_errors) + ++ get_stat64(&hw_stats->rx_undersize_packets); ++ ++ stats->rx_frame_errors = old_stats->rx_frame_errors + ++ get_stat64(&hw_stats->rx_align_errors); ++ stats->tx_aborted_errors = old_stats->tx_aborted_errors + ++ get_stat64(&hw_stats->tx_discards); ++ stats->tx_carrier_errors = old_stats->tx_carrier_errors + ++ get_stat64(&hw_stats->tx_carrier_sense_errors); ++ ++ stats->rx_crc_errors = old_stats->rx_crc_errors + ++ tg3_calc_crc_errors(tp); ++ ++ stats->rx_missed_errors = old_stats->rx_missed_errors + ++ get_stat64(&hw_stats->rx_discards); ++ ++ stats->rx_dropped = tp->rx_dropped; ++ stats->tx_dropped = tp->tx_dropped; ++} ++ ++static int tg3_get_regs_len(struct net_device *dev) ++{ ++ return TG3_REG_BLK_SIZE; ++} ++ ++static void tg3_get_regs(struct net_device *dev, ++ struct ethtool_regs *regs, void *_p) ++{ ++ struct tg3 *tp = netdev_priv(dev); ++ ++ regs->version = 0; ++ ++ memset(_p, 0, TG3_REG_BLK_SIZE); ++ ++ if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) ++ return; ++ ++ tg3_full_lock(tp, 0); ++ ++ tg3_dump_legacy_regs(tp, (u32 *)_p); ++ ++ tg3_full_unlock(tp); ++} ++ ++#if (LINUX_VERSION_CODE >= 0x20418) ++static int tg3_get_eeprom_len(struct net_device *dev) ++{ ++ struct tg3 *tp = netdev_priv(dev); ++ ++ return tp->nvram_size; ++} ++#endif ++ ++#ifdef ETHTOOL_GEEPROM ++static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data) ++{ ++ struct tg3 *tp = netdev_priv(dev); ++ int ret, cpmu_restore = 0; ++ u8 *pd; ++ u32 i, offset, len, b_offset, b_count, cpmu_val = 0; ++ __be32 val; ++ ++ if (tg3_flag(tp, NO_NVRAM)) ++ return -EINVAL; ++ ++ offset = eeprom->offset; ++ len = eeprom->len; ++ eeprom->len = 0; ++ ++ eeprom->magic = TG3_EEPROM_MAGIC; ++ ++ /* Override clock, link aware and link idle modes */ ++ if (tg3_flag(tp, CPMU_PRESENT)) { ++ cpmu_val = tr32(TG3_CPMU_CTRL); ++ if (cpmu_val & (CPMU_CTRL_LINK_AWARE_MODE | ++ CPMU_CTRL_LINK_IDLE_MODE)) { ++ tw32(TG3_CPMU_CTRL, cpmu_val & ++ ~(CPMU_CTRL_LINK_AWARE_MODE | ++ CPMU_CTRL_LINK_IDLE_MODE)); ++ cpmu_restore = 1; ++ } ++ } ++ tg3_override_clk(tp); ++ ++ if (offset & 3) { ++ /* adjustments to start on required 4 byte boundary */ ++ b_offset = offset & 3; ++ b_count = 4 - b_offset; ++ if (b_count > len) { ++ /* i.e. offset=1 len=2 */ ++ b_count = len; ++ } ++ ret = tg3_nvram_read_be32(tp, offset-b_offset, &val); ++ if (ret) ++ goto eeprom_done; ++ memcpy(data, ((char *)&val) + b_offset, b_count); ++ len -= b_count; ++ offset += b_count; ++ eeprom->len += b_count; ++ } ++ ++ /* read bytes up to the last 4 byte boundary */ ++ pd = &data[eeprom->len]; ++ for (i = 0; i < (len - (len & 3)); i += 4) { ++ ret = tg3_nvram_read_be32(tp, offset + i, &val); ++ if (ret) { ++ if (i) ++ i -= 4; ++ eeprom->len += i; ++ goto eeprom_done; ++ } ++ memcpy(pd + i, &val, 4); ++ if (need_resched()) { ++ if (signal_pending(current)) { ++ eeprom->len += i; ++ ret = -EINTR; ++ goto eeprom_done; ++ } ++ cond_resched(); ++ } ++ } ++ eeprom->len += i; ++ ++ if (len & 3) { ++ /* read last bytes not ending on 4 byte boundary */ ++ pd = &data[eeprom->len]; ++ b_count = len & 3; ++ b_offset = offset + len - b_count; ++ ret = tg3_nvram_read_be32(tp, b_offset, &val); ++ if (ret) ++ goto eeprom_done; ++ memcpy(pd, &val, b_count); ++ eeprom->len += b_count; ++ } ++ ret = 0; ++ ++eeprom_done: ++ /* Restore clock, link aware and link idle modes */ ++ tg3_restore_clk(tp); ++ if (cpmu_restore) ++ tw32(TG3_CPMU_CTRL, cpmu_val); ++ ++ return ret; ++} ++#endif ++ ++#ifdef ETHTOOL_SEEPROM ++static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data) ++{ ++ struct tg3 *tp = netdev_priv(dev); ++ int ret; ++ u32 offset, len, b_offset, odd_len; ++ u8 *buf; ++ __be32 start, end; ++ ++ if (tg3_flag(tp, NO_NVRAM) || ++ eeprom->magic != TG3_EEPROM_MAGIC) ++ return -EINVAL; ++ ++ offset = eeprom->offset; ++ len = eeprom->len; ++ ++ if ((b_offset = (offset & 3))) { ++ /* adjustments to start on required 4 byte boundary */ ++ ret = tg3_nvram_read_be32(tp, offset-b_offset, &start); ++ if (ret) ++ return ret; ++ len += b_offset; ++ offset &= ~3; ++ if (len < 4) ++ len = 4; ++ } ++ ++ odd_len = 0; ++ if (len & 3) { ++ /* adjustments to end on required 4 byte boundary */ ++ odd_len = 1; ++ len = (len + 3) & ~3; ++ ret = tg3_nvram_read_be32(tp, offset+len-4, &end); ++ if (ret) ++ return ret; ++ } ++ ++ buf = data; ++ if (b_offset || odd_len) { ++ buf = kmalloc(len, GFP_KERNEL); ++ if (!buf) ++ return -ENOMEM; ++ if (b_offset) ++ memcpy(buf, &start, 4); ++ if (odd_len) ++ memcpy(buf+len-4, &end, 4); ++ memcpy(buf + b_offset, data, eeprom->len); ++ } ++ ++ ret = tg3_nvram_write_block(tp, offset, len, buf); ++ ++ if (buf != data) ++ kfree(buf); ++ ++ return ret; ++} ++#endif ++ ++static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) ++{ ++ struct tg3 *tp = netdev_priv(dev); ++ ++#ifdef BCM_INCLUDE_PHYLIB_SUPPORT ++ if (tg3_flag(tp, USE_PHYLIB)) { ++ struct phy_device *phydev; ++ if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) ++ return -EAGAIN; ++ phydev = tp->mdio_bus->phy_map[tp->phy_addr]; ++ return phy_ethtool_gset(phydev, cmd); ++ } ++#endif /* BCM_INCLUDE_PHYLIB_SUPPORT */ ++ ++ cmd->supported = (SUPPORTED_Autoneg); ++ ++ if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) ++ cmd->supported |= (SUPPORTED_1000baseT_Half | ++ SUPPORTED_1000baseT_Full); ++ ++ if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) { ++ cmd->supported |= (SUPPORTED_100baseT_Half | ++ SUPPORTED_100baseT_Full | ++ SUPPORTED_10baseT_Half | ++ SUPPORTED_10baseT_Full | ++ SUPPORTED_TP); ++ cmd->port = PORT_TP; ++ } else { ++ cmd->supported |= SUPPORTED_FIBRE; ++ cmd->port = PORT_FIBRE; ++ } ++ ++ cmd->advertising = tp->link_config.advertising; ++ if (tg3_flag(tp, PAUSE_AUTONEG)) { ++ if (tp->link_config.flowctrl & FLOW_CTRL_RX) { ++ if (tp->link_config.flowctrl & FLOW_CTRL_TX) { ++ cmd->advertising |= ADVERTISED_Pause; ++ } else { ++ cmd->advertising |= ADVERTISED_Pause | ++ ADVERTISED_Asym_Pause; ++ } ++ } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) { ++ cmd->advertising |= ADVERTISED_Asym_Pause; ++ } ++ } ++ if (netif_running(dev) && tp->link_up) { ++ ethtool_cmd_speed_set(cmd, tp->link_config.active_speed); ++ cmd->duplex = tp->link_config.active_duplex; ++#ifdef BCM_HAS_LP_ADVERTISING ++ cmd->lp_advertising = tp->link_config.rmt_adv; ++#endif /* BCM_HAS_LP_ADVERTISING */ ++#ifdef BCM_HAS_MDIX_STATUS ++ if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) { ++ if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE) ++ cmd->eth_tp_mdix = ETH_TP_MDI_X; ++ else ++ cmd->eth_tp_mdix = ETH_TP_MDI; ++ } ++#endif /* BCM_HAS_MDIX_STATUS */ ++ } else { ++ ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN); ++ cmd->duplex = DUPLEX_UNKNOWN; ++#ifdef BCM_HAS_MDIX_STATUS ++ cmd->eth_tp_mdix = ETH_TP_MDI_INVALID; ++#endif /* BCM_HAS_MDIX_STATUS */ ++ } ++ cmd->phy_address = tp->phy_addr; ++ cmd->transceiver = XCVR_INTERNAL; ++ cmd->autoneg = tp->link_config.autoneg; ++ cmd->maxtxpkt = 0; ++ cmd->maxrxpkt = 0; ++ return 0; ++} ++ ++static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) ++{ ++ struct tg3 *tp = netdev_priv(dev); ++ u32 speed = ethtool_cmd_speed(cmd); ++ ++#ifdef BCM_INCLUDE_PHYLIB_SUPPORT ++ if (tg3_flag(tp, USE_PHYLIB)) { ++ struct phy_device *phydev; ++ if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) ++ return -EAGAIN; ++ phydev = tp->mdio_bus->phy_map[tp->phy_addr]; ++ return phy_ethtool_sset(phydev, cmd); ++ } ++#endif /* BCM_INCLUDE_PHYLIB_SUPPORT */ ++ ++ if (cmd->autoneg != AUTONEG_ENABLE && ++ cmd->autoneg != AUTONEG_DISABLE) ++ return -EINVAL; ++ ++ if (cmd->autoneg == AUTONEG_DISABLE && ++ cmd->duplex != DUPLEX_FULL && ++ cmd->duplex != DUPLEX_HALF) ++ return -EINVAL; ++ ++ if (cmd->autoneg == AUTONEG_ENABLE) { ++ u32 mask = ADVERTISED_Autoneg | ++ ADVERTISED_Pause | ++ ADVERTISED_Asym_Pause; ++ ++ if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) ++ mask |= ADVERTISED_1000baseT_Half | ++ ADVERTISED_1000baseT_Full; ++ ++ if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) ++ mask |= ADVERTISED_100baseT_Half | ++ ADVERTISED_100baseT_Full | ++ ADVERTISED_10baseT_Half | ++ ADVERTISED_10baseT_Full | ++ ADVERTISED_TP; ++ else ++ mask |= ADVERTISED_FIBRE; ++ ++ if (cmd->advertising & ~mask) ++ return -EINVAL; ++ ++ mask &= (ADVERTISED_1000baseT_Half | ++ ADVERTISED_1000baseT_Full | ++ ADVERTISED_100baseT_Half | ++ ADVERTISED_100baseT_Full | ++ ADVERTISED_10baseT_Half | ++ ADVERTISED_10baseT_Full); ++ ++ cmd->advertising &= mask; ++ } else { ++ if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) { ++ if (speed != SPEED_1000) ++ return -EINVAL; ++ ++ if (cmd->duplex != DUPLEX_FULL) ++ return -EINVAL; ++ } else { ++ if (speed != SPEED_100 && ++ speed != SPEED_10) ++ return -EINVAL; ++ } ++ } ++ ++ tg3_full_lock(tp, 0); ++ ++ tp->link_config.autoneg = cmd->autoneg; ++ if (cmd->autoneg == AUTONEG_ENABLE) { ++ tp->link_config.advertising = (cmd->advertising | ++ ADVERTISED_Autoneg); ++ tp->link_config.speed = SPEED_UNKNOWN; ++ tp->link_config.duplex = DUPLEX_UNKNOWN; ++ } else { ++ tp->link_config.advertising = 0; ++ tp->link_config.speed = speed; ++ tp->link_config.duplex = cmd->duplex; ++ } ++ ++ tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED; ++ ++ tg3_warn_mgmt_link_flap(tp); ++ ++ if (netif_running(dev)) ++ tg3_setup_phy(tp, true); ++ ++ tg3_full_unlock(tp); ++ ++ return 0; ++} ++ ++static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) ++{ ++ struct tg3 *tp = netdev_priv(dev); ++ ++ strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver)); ++ strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version)); ++ strlcpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version)); ++ strlcpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info)); ++} ++ ++static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) ++{ ++ struct tg3 *tp = netdev_priv(dev); ++ ++ if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev)) ++ wol->supported = WAKE_MAGIC; ++ else ++ wol->supported = 0; ++ wol->wolopts = 0; ++ if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev)) ++ wol->wolopts = WAKE_MAGIC; ++ memset(&wol->sopass, 0, sizeof(wol->sopass)); ++} ++ ++static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) ++{ ++ struct tg3 *tp = netdev_priv(dev); ++#ifdef BCM_HAS_DEVICE_WAKEUP_API ++ struct device *dp = &tp->pdev->dev; ++#endif ++ ++ if (wol->wolopts & ~WAKE_MAGIC) ++ return -EINVAL; ++ if ((wol->wolopts & WAKE_MAGIC) && ++ !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp))) ++ return -EINVAL; ++ ++ device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC); ++ ++ if (wol->wolopts & WAKE_MAGIC) ++ tg3_flag_set(tp, WOL_ENABLE); ++ else ++ tg3_flag_clear(tp, WOL_ENABLE); ++ ++ return 0; ++} ++ ++static u32 tg3_get_msglevel(struct net_device *dev) ++{ ++ struct tg3 *tp = netdev_priv(dev); ++ return tp->msg_enable; ++} ++ ++static void tg3_set_msglevel(struct net_device *dev, u32 value) ++{ ++ struct tg3 *tp = netdev_priv(dev); ++ tp->msg_enable = value; ++} ++ ++static int tg3_nway_reset(struct net_device *dev) ++{ ++ struct tg3 *tp = netdev_priv(dev); ++ int r; ++ ++ if (!netif_running(dev)) ++ return -EAGAIN; ++ ++ if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ++ return -EINVAL; ++ ++ tg3_warn_mgmt_link_flap(tp); ++ ++#ifdef BCM_INCLUDE_PHYLIB_SUPPORT ++ if (tg3_flag(tp, USE_PHYLIB)) { ++ if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) ++ return -EAGAIN; ++ r = phy_start_aneg(tp->mdio_bus->phy_map[tp->phy_addr]); ++ } else ++#endif /* BCM_INCLUDE_PHYLIB_SUPPORT */ ++ { ++ u32 bmcr; ++ ++ spin_lock_bh(&tp->lock); ++ r = -EINVAL; ++ tg3_readphy(tp, MII_BMCR, &bmcr); ++ if (!tg3_readphy(tp, MII_BMCR, &bmcr) && ++ ((bmcr & BMCR_ANENABLE) || ++ (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) { ++ tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART | ++ BMCR_ANENABLE); ++ r = 0; ++ } ++ spin_unlock_bh(&tp->lock); ++ } ++ ++ return r; ++} ++ ++static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering) ++{ ++ struct tg3 *tp = netdev_priv(dev); ++ ++ ering->rx_max_pending = tp->rx_std_ring_mask; ++ ering->rx_mini_max_pending = 0; ++ if (tg3_flag(tp, JUMBO_RING_ENABLE)) ++ ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask; ++ else ++ ering->rx_jumbo_max_pending = 0; ++ ++ ering->tx_max_pending = TG3_TX_RING_SIZE - 1; ++ ++ ering->rx_pending = tp->rx_pending; ++ ering->rx_mini_pending = 0; ++ if (tg3_flag(tp, JUMBO_RING_ENABLE)) ++ ering->rx_jumbo_pending = tp->rx_jumbo_pending; ++ else ++ ering->rx_jumbo_pending = 0; ++ ++ ering->tx_pending = tp->napi[0].tx_pending; ++} ++ ++static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering) ++{ ++ struct tg3 *tp = netdev_priv(dev); ++ int i, irq_sync = 0, err = 0; ++ ++ if (!ering->rx_pending || (ering->rx_pending > tp->rx_std_ring_mask) || ++ (tg3_flag(tp, JUMBO_RING_ENABLE) && !ering->rx_jumbo_pending) || ++ (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) || ++ (ering->tx_pending > TG3_TX_RING_SIZE - 1) || ++ (ering->tx_pending <= MAX_SKB_FRAGS) || ++ (tg3_flag(tp, TSO_BUG) && ++ (ering->tx_pending <= (MAX_SKB_FRAGS * 3)))) ++ return -EINVAL; ++ ++ if (netif_running(dev)) { ++ tg3_phy_stop(tp); ++ tg3_netif_stop(tp); ++#ifdef TG3_VMWARE_NETQ_ENABLE ++ tg3_netq_invalidate_state(tp); ++#endif ++ irq_sync = 1; ++ } ++ ++ tg3_full_lock(tp, irq_sync); ++ ++ tp->rx_pending = ering->rx_pending; ++ ++ if (tg3_flag(tp, MAX_RXPEND_64) && ++ tp->rx_pending > 63) ++ tp->rx_pending = 63; ++ tp->rx_jumbo_pending = ering->rx_jumbo_pending; ++ ++ for (i = 0; i < tp->irq_max; i++) ++ tp->napi[i].tx_pending = ering->tx_pending; ++ ++ if (netif_running(dev)) { ++ tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); ++ err = tg3_restart_hw(tp, false); ++ if (!err) ++ tg3_netif_start(tp); ++ } ++ ++ tg3_full_unlock(tp); ++ ++ if (irq_sync && !err) ++ tg3_phy_start(tp); ++ ++ return err; ++} ++ ++static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause) ++{ ++ struct tg3 *tp = netdev_priv(dev); ++ ++ epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG); ++ ++ if (tp->link_config.flowctrl & FLOW_CTRL_RX) ++ epause->rx_pause = 1; ++ else ++ epause->rx_pause = 0; ++ ++ if (tp->link_config.flowctrl & FLOW_CTRL_TX) ++ epause->tx_pause = 1; ++ else ++ epause->tx_pause = 0; ++} ++ ++static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause) ++{ ++ struct tg3 *tp = netdev_priv(dev); ++ int err = 0; ++ ++ if (tp->link_config.autoneg == AUTONEG_ENABLE) ++ tg3_warn_mgmt_link_flap(tp); ++ ++#ifdef BCM_INCLUDE_PHYLIB_SUPPORT ++ if (tg3_flag(tp, USE_PHYLIB)) { ++ u32 newadv; ++ struct phy_device *phydev; ++ ++ phydev = tp->mdio_bus->phy_map[tp->phy_addr]; ++ ++ if (!(phydev->supported & SUPPORTED_Pause) || ++ (!(phydev->supported & SUPPORTED_Asym_Pause) && ++ (epause->rx_pause != epause->tx_pause))) ++ return -EINVAL; ++ ++ tp->link_config.flowctrl = 0; ++ if (epause->rx_pause) { ++ tp->link_config.flowctrl |= FLOW_CTRL_RX; ++ ++ if (epause->tx_pause) { ++ tp->link_config.flowctrl |= FLOW_CTRL_TX; ++ newadv = ADVERTISED_Pause; ++ } else ++ newadv = ADVERTISED_Pause | ++ ADVERTISED_Asym_Pause; ++ } else if (epause->tx_pause) { ++ tp->link_config.flowctrl |= FLOW_CTRL_TX; ++ newadv = ADVERTISED_Asym_Pause; ++ } else ++ newadv = 0; ++ ++ if (epause->autoneg) ++ tg3_flag_set(tp, PAUSE_AUTONEG); ++ else ++ tg3_flag_clear(tp, PAUSE_AUTONEG); ++ ++ if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) { ++ u32 oldadv = phydev->advertising & ++ (ADVERTISED_Pause | ADVERTISED_Asym_Pause); ++ if (oldadv != newadv) { ++ phydev->advertising &= ++ ~(ADVERTISED_Pause | ++ ADVERTISED_Asym_Pause); ++ phydev->advertising |= newadv; ++ if (phydev->autoneg) { ++ /* ++ * Always renegotiate the link to ++ * inform our link partner of our ++ * flow control settings, even if the ++ * flow control is forced. Let ++ * tg3_adjust_link() do the final ++ * flow control setup. ++ */ ++ return phy_start_aneg(phydev); ++ } ++ } ++ ++ if (!epause->autoneg) ++ tg3_setup_flow_control(tp, 0, 0); ++ } else { ++ tp->link_config.advertising &= ++ ~(ADVERTISED_Pause | ++ ADVERTISED_Asym_Pause); ++ tp->link_config.advertising |= newadv; ++ } ++ } else ++#endif /* BCM_INCLUDE_PHYLIB_SUPPORT */ ++ { ++ int irq_sync = 0; ++ ++ if (netif_running(dev)) { ++ tg3_netif_stop(tp); ++#ifdef TG3_VMWARE_NETQ_ENABLE ++ tg3_netq_invalidate_state(tp); ++#endif ++ irq_sync = 1; ++ } ++ ++ tg3_full_lock(tp, irq_sync); ++ ++ if (epause->autoneg) ++ tg3_flag_set(tp, PAUSE_AUTONEG); ++ else ++ tg3_flag_clear(tp, PAUSE_AUTONEG); ++ if (epause->rx_pause) ++ tp->link_config.flowctrl |= FLOW_CTRL_RX; ++ else ++ tp->link_config.flowctrl &= ~FLOW_CTRL_RX; ++ if (epause->tx_pause) ++ tp->link_config.flowctrl |= FLOW_CTRL_TX; ++ else ++ tp->link_config.flowctrl &= ~FLOW_CTRL_TX; ++ ++ if (netif_running(dev)) { ++ tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); ++ err = tg3_restart_hw(tp, false); ++ if (!err) ++ tg3_netif_start(tp); ++ } ++ ++ tg3_full_unlock(tp); ++ } ++ ++ tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED; ++ ++ return err; ++} ++ ++static int tg3_get_sset_count(struct net_device *dev, int sset) ++{ ++ switch (sset) { ++ case ETH_SS_TEST: ++ return TG3_NUM_TEST; ++ case ETH_SS_STATS: ++#ifdef TG3_VMWARE_NETQ_ENABLE ++ return tg3_netq_stats_size(netdev_priv(dev)); ++#else ++ return TG3_NUM_STATS; ++#endif ++ default: ++ return -EOPNOTSUPP; ++ } ++} ++ ++#if (LINUX_VERSION_CODE < 0x020618) ++static int tg3_get_stats_count (struct net_device *dev) ++{ ++ return tg3_get_sset_count(dev, ETH_SS_STATS); ++} ++ ++static int tg3_get_test_count (struct net_device *dev) ++{ ++ return tg3_get_sset_count(dev, ETH_SS_TEST); ++} ++#endif ++ ++#if defined(BCM_HAS_GET_RXNFC) && !defined(GET_ETHTOOL_OP_EXT) ++#ifdef BCM_HAS_OLD_GET_RXNFC_SIG ++static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, ++ void *rules) ++#else ++static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, ++ u32 *rules __always_unused) ++#endif /* BCM_HAS_OLD_GET_RXNFC_SIG */ ++{ ++ struct tg3 *tp = netdev_priv(dev); ++ ++ if (!tg3_flag(tp, SUPPORT_MSIX)) ++ return -EOPNOTSUPP; ++ ++ switch (info->cmd) { ++ case ETHTOOL_GRXRINGS: ++ if (netif_running(tp->dev)) ++ info->data = tp->rxq_cnt; ++ else { ++ info->data = num_online_cpus(); ++ if (info->data > TG3_RSS_MAX_NUM_QS) ++ info->data = TG3_RSS_MAX_NUM_QS; ++ } ++ return 0; ++ ++ default: ++ return -EOPNOTSUPP; ++ } ++} ++#endif /* BCM_HAS_GET_RXNFC */ ++ ++#if defined(BCM_HAS_GET_RXFH_INDIR_SIZE) && !defined(GET_ETHTOOL_OP_EXT) ++static u32 tg3_get_rxfh_indir_size(struct net_device *dev) ++{ ++ u32 size = 0; ++ struct tg3 *tp = netdev_priv(dev); ++ ++ if (tg3_flag(tp, SUPPORT_MSIX)) ++ size = TG3_RSS_INDIR_TBL_SIZE; ++ ++ return size; ++} ++ ++#ifdef BCM_HAS_OLD_RXFH_INDIR ++static int tg3_get_rxfh_indir(struct net_device *dev, u32 *indir) ++#else ++static int tg3_get_rxfh(struct net_device *dev, u32 *indir, u8 *key) ++#endif ++{ ++ struct tg3 *tp = netdev_priv(dev); ++ int i; ++ ++ for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) ++ indir[i] = tp->rss_ind_tbl[i]; ++ ++ return 0; ++} ++#ifdef BCM_HAS_OLD_RXFH_INDIR ++static int tg3_set_rxfh_indir(struct net_device *dev, const u32 *indir) ++#else ++static int tg3_set_rxfh(struct net_device *dev, const u32 *indir, const u8 *key) ++#endif ++{ ++ struct tg3 *tp = netdev_priv(dev); ++ size_t i; ++ ++ for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) ++ tp->rss_ind_tbl[i] = indir[i]; ++ ++ if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS)) ++ return 0; ++ ++ /* It is legal to write the indirection ++ * table while the device is running. ++ */ ++ tg3_full_lock(tp, 0); ++ tg3_rss_write_indir_tbl(tp); ++ tg3_full_unlock(tp); ++ ++ return 0; ++} ++#endif /* BCM_HAS_GET_RXFH_INDIR_SIZE */ ++ ++#if defined(ETHTOOL_GCHANNELS) ++static void tg3_get_channels(struct net_device *dev, ++ struct ethtool_channels *channel) ++{ ++ struct tg3 *tp = netdev_priv(dev); ++ u32 deflt_qs = netif_get_num_default_rss_queues(); ++ ++ channel->max_rx = tp->rxq_max; ++ channel->max_tx = tp->txq_max; ++ ++ if (netif_running(dev)) { ++ channel->rx_count = tp->rxq_cnt; ++ channel->tx_count = tp->txq_cnt; ++ } else { ++ if (tp->rxq_req) ++ channel->rx_count = tp->rxq_req; ++ else ++ channel->rx_count = min(deflt_qs, tp->rxq_max); ++ ++ if (tp->txq_req) ++ channel->tx_count = tp->txq_req; ++ else ++ channel->tx_count = min(deflt_qs, tp->txq_max); ++ } ++} ++ ++static int tg3_set_channels(struct net_device *dev, ++ struct ethtool_channels *channel) ++{ ++ struct tg3 *tp = netdev_priv(dev); ++ ++ if (!tg3_flag(tp, SUPPORT_MSIX)) ++ return -EOPNOTSUPP; ++ ++ if (channel->rx_count > tp->rxq_max || ++ channel->tx_count > tp->txq_max) ++ return -EINVAL; ++ ++ tp->rxq_req = channel->rx_count; ++ tp->txq_req = channel->tx_count; ++ ++ if (!netif_running(dev)) ++ return 0; ++ ++ tg3_stop(tp); ++ ++ tg3_carrier_off(tp); ++ ++ tg3_start(tp, true, false, false); ++ ++ return 0; ++} ++#endif /* ETHTOOL_GCHANNELS */ ++ ++static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf) ++{ ++#ifdef TG3_VMWARE_NETQ_ENABLE ++ struct tg3 *tp = netdev_priv(dev); ++#endif ++ ++ switch (stringset) { ++ case ETH_SS_STATS: ++ memcpy(buf, ðtool_stats_keys, sizeof(ethtool_stats_keys)); ++#ifdef TG3_VMWARE_NETQ_ENABLE ++ if (tg3_flag(tp, ENABLE_IOV)) { ++ buf += sizeof(ethtool_stats_keys); ++ tg3_netq_stats_get_strings(tp, buf); ++ } ++#endif ++ break; ++ case ETH_SS_TEST: ++ memcpy(buf, ðtool_test_keys, sizeof(ethtool_test_keys)); ++ break; ++ default: ++ WARN_ON(1); /* we need a WARN() */ ++ break; ++ } ++} ++ ++static int tg3_set_phys_id(struct net_device *dev, ++ enum ethtool_phys_id_state state) ++{ ++ struct tg3 *tp = netdev_priv(dev); ++ ++ if (!netif_running(tp->dev)) ++ return -EAGAIN; ++ ++ switch (state) { ++ case ETHTOOL_ID_ACTIVE: ++ return 1; /* cycle on/off once per second */ ++ ++ case ETHTOOL_ID_ON: ++ tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE | ++ LED_CTRL_1000MBPS_ON | ++ LED_CTRL_100MBPS_ON | ++ LED_CTRL_10MBPS_ON | ++ LED_CTRL_TRAFFIC_OVERRIDE | ++ LED_CTRL_TRAFFIC_BLINK | ++ LED_CTRL_TRAFFIC_LED); ++ break; ++ ++ case ETHTOOL_ID_OFF: ++ tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE | ++ LED_CTRL_TRAFFIC_OVERRIDE); ++ break; ++ ++ case ETHTOOL_ID_INACTIVE: ++ tw32(MAC_LED_CTRL, tp->led_ctrl); ++ break; ++ } ++ ++ return 0; ++} ++ ++static void tg3_get_ethtool_stats(struct net_device *dev, ++ struct ethtool_stats *estats, u64 *tmp_stats) ++{ ++ struct tg3 *tp = netdev_priv(dev); ++ ++ if (tp->hw_stats) { ++ tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats); ++ } ++ else { ++ memset(tmp_stats, 0, sizeof(struct tg3_ethtool_stats)); ++#if !defined(__VMKLNX__) ++ ((struct tg3_ethtool_stats *)tmp_stats)->unrecoverable_err = ++ tp->unrecoverable_err; ++ ((struct tg3_ethtool_stats *)tmp_stats)->recoverable_err = ++ tp->recoverable_err; ++#endif ++ } ++ ++#ifdef TG3_VMWARE_NETQ_ENABLE ++ tg3_netq_stats_get(tp, tmp_stats + TG3_NUM_STATS); ++#endif ++} ++ ++static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen) ++{ ++ int i; ++ __be32 *buf; ++ u32 offset = 0, len = 0; ++ u32 magic, val; ++ ++ if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic)) ++ return NULL; ++ ++ if (magic == TG3_EEPROM_MAGIC) { ++ for (offset = TG3_NVM_DIR_START; ++ offset < TG3_NVM_DIR_END; ++ offset += TG3_NVM_DIRENT_SIZE) { ++ if (tg3_nvram_read(tp, offset, &val)) ++ return NULL; ++ ++ if ((val >> TG3_NVM_DIRTYPE_SHIFT) == ++ TG3_NVM_DIRTYPE_EXTVPD) ++ break; ++ } ++ ++ if (offset != TG3_NVM_DIR_END) { ++ len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4; ++ if (tg3_nvram_read(tp, offset + 4, &offset)) ++ return NULL; ++ ++ offset = tg3_nvram_logical_addr(tp, offset); ++ } ++ } ++ ++ if (!offset || !len) { ++ offset = TG3_NVM_VPD_OFF; ++ len = TG3_NVM_VPD_LEN; ++ } ++ ++ buf = kmalloc(len, GFP_KERNEL); ++ if (buf == NULL) ++ return NULL; ++ ++ if (magic == TG3_EEPROM_MAGIC) { ++ for (i = 0; i < len; i += 4) { ++ /* The data is in little-endian format in NVRAM. ++ * Use the big-endian read routines to preserve ++ * the byte order as it exists in NVRAM. ++ */ ++ if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4])) ++ goto error; ++ } ++ } else { ++ u8 *ptr; ++ ssize_t cnt; ++ unsigned int pos = 0; ++ ++ ptr = (u8 *)&buf[0]; ++ for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) { ++ cnt = pci_read_vpd(tp->pdev, pos, ++ len - pos, ptr); ++ if (cnt == -ETIMEDOUT || cnt == -EINTR) ++ cnt = 0; ++ else if (cnt < 0) ++ goto error; ++ } ++ if (pos != len) ++ goto error; ++ } ++ ++ *vpdlen = len; ++ ++ return buf; ++ ++error: ++ kfree(buf); ++ return NULL; ++} ++ ++#define NVRAM_TEST_SIZE 0x100 ++#define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14 ++#define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18 ++#define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c ++#define NVRAM_SELFBOOT_FORMAT1_4_SIZE 0x20 ++#define NVRAM_SELFBOOT_FORMAT1_5_SIZE 0x24 ++#define NVRAM_SELFBOOT_FORMAT1_6_SIZE 0x50 ++#define NVRAM_SELFBOOT_HW_SIZE 0x20 ++#define NVRAM_SELFBOOT_DATA_SIZE 0x1c ++ ++static int tg3_test_nvram(struct tg3 *tp) ++{ ++ u32 csum, magic, len; ++ __be32 *buf; ++ int i, j, k, err = 0, size; ++ ++ if (tg3_flag(tp, NO_NVRAM)) ++ return 0; ++ ++ if (tg3_nvram_read(tp, 0, &magic) != 0) ++ return -EIO; ++ ++ if (magic == TG3_EEPROM_MAGIC) ++ size = NVRAM_TEST_SIZE; ++ else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) { ++ if ((magic & TG3_EEPROM_SB_FORMAT_MASK) == ++ TG3_EEPROM_SB_FORMAT_1) { ++ switch (magic & TG3_EEPROM_SB_REVISION_MASK) { ++ case TG3_EEPROM_SB_REVISION_0: ++ size = NVRAM_SELFBOOT_FORMAT1_0_SIZE; ++ break; ++ case TG3_EEPROM_SB_REVISION_2: ++ size = NVRAM_SELFBOOT_FORMAT1_2_SIZE; ++ break; ++ case TG3_EEPROM_SB_REVISION_3: ++ size = NVRAM_SELFBOOT_FORMAT1_3_SIZE; ++ break; ++ case TG3_EEPROM_SB_REVISION_4: ++ size = NVRAM_SELFBOOT_FORMAT1_4_SIZE; ++ break; ++ case TG3_EEPROM_SB_REVISION_5: ++ size = NVRAM_SELFBOOT_FORMAT1_5_SIZE; ++ break; ++ case TG3_EEPROM_SB_REVISION_6: ++ size = NVRAM_SELFBOOT_FORMAT1_6_SIZE; ++ break; ++ default: ++ return -EIO; ++ } ++ } else ++ return 0; ++ } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW) ++ size = NVRAM_SELFBOOT_HW_SIZE; ++ else ++ return -EIO; ++ ++ buf = kmalloc(size, GFP_KERNEL); ++ if (buf == NULL) ++ return -ENOMEM; ++ ++ err = -EIO; ++ for (i = 0, j = 0; i < size; i += 4, j++) { ++ err = tg3_nvram_read_be32(tp, i, &buf[j]); ++ if (err) ++ break; ++ } ++ if (i < size) ++ goto out; ++ ++ /* Selfboot format */ ++ magic = be32_to_cpu(buf[0]); ++ if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == ++ TG3_EEPROM_MAGIC_FW) { ++ u8 *buf8 = (u8 *) buf, csum8 = 0; ++ ++ if ((magic & TG3_EEPROM_SB_REVISION_MASK) == ++ TG3_EEPROM_SB_REVISION_2) { ++ /* For rev 2, the csum doesn't include the MBA. */ ++ for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++) ++ csum8 += buf8[i]; ++ for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++) ++ csum8 += buf8[i]; ++ } else { ++ for (i = 0; i < size; i++) ++ csum8 += buf8[i]; ++ } ++ ++ if (csum8 == 0) { ++ err = 0; ++ goto out; ++ } ++ ++ err = -EIO; ++ goto out; ++ } ++ ++ if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == ++ TG3_EEPROM_MAGIC_HW) { ++ u8 data[NVRAM_SELFBOOT_DATA_SIZE]; ++ u8 parity[NVRAM_SELFBOOT_DATA_SIZE]; ++ u8 *buf8 = (u8 *) buf; ++ ++ /* Separate the parity bits and the data bytes. */ ++ for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) { ++ if ((i == 0) || (i == 8)) { ++ int l; ++ u8 msk; ++ ++ for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1) ++ parity[k++] = buf8[i] & msk; ++ i++; ++ } else if (i == 16) { ++ int l; ++ u8 msk; ++ ++ for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1) ++ parity[k++] = buf8[i] & msk; ++ i++; ++ ++ for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1) ++ parity[k++] = buf8[i] & msk; ++ i++; ++ } ++ data[j++] = buf8[i]; ++ } ++ ++ err = -EIO; ++ for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) { ++ u8 hw8 = hweight8(data[i]); ++ ++ if ((hw8 & 0x1) && parity[i]) ++ goto out; ++ else if (!(hw8 & 0x1) && !parity[i]) ++ goto out; ++ } ++ err = 0; ++ goto out; ++ } ++ ++ err = -EIO; ++ ++ /* Bootstrap checksum at offset 0x10 */ ++ csum = calc_crc((unsigned char *) buf, 0x10); ++ if (csum != le32_to_cpu(buf[0x10/4])) ++ goto out; ++ ++ /* Manufacturing block starts at offset 0x74, checksum at 0xfc */ ++ csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88); ++ if (csum != le32_to_cpu(buf[0xfc/4])) ++ goto out; ++ ++ kfree(buf); ++ ++ buf = tg3_vpd_readblock(tp, &len); ++ if (!buf) ++ return -ENOMEM; ++ ++ i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA); ++ if (i > 0) { ++ j = pci_vpd_lrdt_size(&((u8 *)buf)[i]); ++ if (j < 0) ++ goto out; ++ ++ if (i + PCI_VPD_LRDT_TAG_SIZE + j > len) ++ goto out; ++ ++ i += PCI_VPD_LRDT_TAG_SIZE; ++ j = pci_vpd_find_info_keyword((u8 *)buf, i, j, ++ PCI_VPD_RO_KEYWORD_CHKSUM); ++ if (j > 0) { ++ u8 csum8 = 0; ++ ++ j += PCI_VPD_INFO_FLD_HDR_SIZE; ++ ++ for (i = 0; i <= j; i++) ++ csum8 += ((u8 *)buf)[i]; ++ ++ if (csum8) ++ goto out; ++ } ++ } ++ ++ err = 0; ++ ++out: ++ kfree(buf); ++ return err; ++} ++ ++#define TG3_SERDES_TIMEOUT_SEC 2 ++#define TG3_COPPER_TIMEOUT_SEC 7 ++ ++static int tg3_test_link(struct tg3 *tp) ++{ ++ int i, max; ++ ++ if (!netif_running(tp->dev)) ++ return -ENODEV; ++ ++ if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) ++ max = TG3_SERDES_TIMEOUT_SEC; ++ else ++ max = TG3_COPPER_TIMEOUT_SEC; ++ ++ for (i = 0; i < max; i++) { ++ if (tp->link_up) ++ return 0; ++ ++ if (msleep_interruptible(1000)) ++ break; ++ } ++ ++ return -EIO; ++} ++ ++/* Only test the commonly used registers */ ++static int tg3_test_registers(struct tg3 *tp) ++{ ++ int i, is_5705, is_5750; ++ u32 offset, read_mask, write_mask, val, save_val, read_val; ++ static struct { ++ u16 offset; ++ u16 flags; ++#define TG3_FL_5705 0x1 ++#define TG3_FL_NOT_5705 0x2 ++#define TG3_FL_NOT_5788 0x4 ++#define TG3_FL_NOT_5750 0x8 ++ u32 read_mask; ++ u32 write_mask; ++ } reg_tbl[] = { ++ /* MAC Control Registers */ ++ { MAC_MODE, TG3_FL_NOT_5705, ++ 0x00000000, 0x00ef6f8c }, ++ { MAC_MODE, TG3_FL_5705, ++ 0x00000000, 0x01ef6b8c }, ++ { MAC_STATUS, TG3_FL_NOT_5705, ++ 0x03800107, 0x00000000 }, ++ { MAC_STATUS, TG3_FL_5705, ++ 0x03800100, 0x00000000 }, ++ { MAC_ADDR_0_HIGH, 0x0000, ++ 0x00000000, 0x0000ffff }, ++ { MAC_ADDR_0_LOW, 0x0000, ++ 0x00000000, 0xffffffff }, ++ { MAC_RX_MTU_SIZE, 0x0000, ++ 0x00000000, 0x0000ffff }, ++ { MAC_TX_MODE, 0x0000, ++ 0x00000000, 0x00000070 }, ++ { MAC_TX_LENGTHS, 0x0000, ++ 0x00000000, 0x00003fff }, ++ { MAC_RX_MODE, TG3_FL_NOT_5705, ++ 0x00000000, 0x000007fc }, ++ { MAC_RX_MODE, TG3_FL_5705, ++ 0x00000000, 0x000007dc }, ++ { MAC_HASH_REG_0, 0x0000, ++ 0x00000000, 0xffffffff }, ++ { MAC_HASH_REG_1, 0x0000, ++ 0x00000000, 0xffffffff }, ++ { MAC_HASH_REG_2, 0x0000, ++ 0x00000000, 0xffffffff }, ++ { MAC_HASH_REG_3, 0x0000, ++ 0x00000000, 0xffffffff }, ++ ++ /* Receive Data and Receive BD Initiator Control Registers. */ ++ { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705, ++ 0x00000000, 0xffffffff }, ++ { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705, ++ 0x00000000, 0xffffffff }, ++ { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705, ++ 0x00000000, 0x00000003 }, ++ { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705, ++ 0x00000000, 0xffffffff }, ++ { RCVDBDI_STD_BD+0, 0x0000, ++ 0x00000000, 0xffffffff }, ++ { RCVDBDI_STD_BD+4, 0x0000, ++ 0x00000000, 0xffffffff }, ++ { RCVDBDI_STD_BD+8, 0x0000, ++ 0x00000000, 0xffff0002 }, ++ { RCVDBDI_STD_BD+0xc, 0x0000, ++ 0x00000000, 0xffffffff }, ++ ++ /* Receive BD Initiator Control Registers. */ ++ { RCVBDI_STD_THRESH, TG3_FL_NOT_5705, ++ 0x00000000, 0xffffffff }, ++ { RCVBDI_STD_THRESH, TG3_FL_5705, ++ 0x00000000, 0x000003ff }, ++ { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705, ++ 0x00000000, 0xffffffff }, ++ ++ /* Host Coalescing Control Registers. */ ++ { HOSTCC_MODE, TG3_FL_NOT_5705, ++ 0x00000000, 0x00000004 }, ++ { HOSTCC_MODE, TG3_FL_5705, ++ 0x00000000, 0x000000f6 }, ++ { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705, ++ 0x00000000, 0xffffffff }, ++ { HOSTCC_RXCOL_TICKS, TG3_FL_5705, ++ 0x00000000, 0x000003ff }, ++ { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705, ++ 0x00000000, 0xffffffff }, ++ { HOSTCC_TXCOL_TICKS, TG3_FL_5705, ++ 0x00000000, 0x000003ff }, ++ { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705, ++ 0x00000000, 0xffffffff }, ++ { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788, ++ 0x00000000, 0x000000ff }, ++ { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705, ++ 0x00000000, 0xffffffff }, ++ { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788, ++ 0x00000000, 0x000000ff }, ++ { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705, ++ 0x00000000, 0xffffffff }, ++ { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705, ++ 0x00000000, 0xffffffff }, ++ { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705, ++ 0x00000000, 0xffffffff }, ++ { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788, ++ 0x00000000, 0x000000ff }, ++ { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705, ++ 0x00000000, 0xffffffff }, ++ { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788, ++ 0x00000000, 0x000000ff }, ++ { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705, ++ 0x00000000, 0xffffffff }, ++ { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705, ++ 0x00000000, 0xffffffff }, ++ { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705, ++ 0x00000000, 0xffffffff }, ++ { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000, ++ 0x00000000, 0xffffffff }, ++ { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000, ++ 0x00000000, 0xffffffff }, ++ { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000, ++ 0xffffffff, 0x00000000 }, ++ { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000, ++ 0xffffffff, 0x00000000 }, ++ ++ /* Buffer Manager Control Registers. */ ++ { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750, ++ 0x00000000, 0x007fff80 }, ++ { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750, ++ 0x00000000, 0x007fffff }, ++ { BUFMGR_MB_RDMA_LOW_WATER, 0x0000, ++ 0x00000000, 0x0000003f }, ++ { BUFMGR_MB_MACRX_LOW_WATER, 0x0000, ++ 0x00000000, 0x000001ff }, ++ { BUFMGR_MB_HIGH_WATER, 0x0000, ++ 0x00000000, 0x000001ff }, ++ { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705, ++ 0xffffffff, 0x00000000 }, ++ { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705, ++ 0xffffffff, 0x00000000 }, ++ ++ /* Mailbox Registers */ ++ { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000, ++ 0x00000000, 0x000001ff }, ++ { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705, ++ 0x00000000, 0x000001ff }, ++ { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000, ++ 0x00000000, 0x000007ff }, ++ { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000, ++ 0x00000000, 0x000001ff }, ++ ++ { 0xffff, 0x0000, 0x00000000, 0x00000000 }, ++ }; ++ ++ is_5705 = is_5750 = 0; ++ if (tg3_flag(tp, 5705_PLUS)) { ++ is_5705 = 1; ++ if (tg3_flag(tp, 5750_PLUS)) ++ is_5750 = 1; ++ } ++ ++ for (i = 0; reg_tbl[i].offset != 0xffff; i++) { ++ if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705)) ++ continue; ++ ++ if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705)) ++ continue; ++ ++ if (tg3_flag(tp, IS_5788) && ++ (reg_tbl[i].flags & TG3_FL_NOT_5788)) ++ continue; ++ ++ if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750)) ++ continue; ++ ++ offset = (u32) reg_tbl[i].offset; ++ read_mask = reg_tbl[i].read_mask; ++ write_mask = reg_tbl[i].write_mask; ++ ++ /* Save the original register content */ ++ save_val = tr32(offset); ++ ++ /* Determine the read-only value. */ ++ read_val = save_val & read_mask; ++ ++ /* Write zero to the register, then make sure the read-only bits ++ * are not changed and the read/write bits are all zeros. ++ */ ++ tw32(offset, 0); ++ ++ val = tr32(offset); ++ ++ /* Test the read-only and read/write bits. */ ++ if (((val & read_mask) != read_val) || (val & write_mask)) ++ goto out; ++ ++ /* Write ones to all the bits defined by RdMask and WrMask, then ++ * make sure the read-only bits are not changed and the ++ * read/write bits are all ones. ++ */ ++ tw32(offset, read_mask | write_mask); ++ ++ val = tr32(offset); ++ ++ /* Test the read-only bits. */ ++ if ((val & read_mask) != read_val) ++ goto out; ++ ++ /* Test the read/write bits. */ ++ if ((val & write_mask) != write_mask) ++ goto out; ++ ++ tw32(offset, save_val); ++ } ++ ++ return 0; ++ ++out: ++ if (netif_msg_hw(tp)) ++ netdev_err(tp->dev, ++ "Register test failed at offset %x\n", offset); ++ tw32(offset, save_val); ++ return -EIO; ++} ++ ++static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len) ++{ ++ static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a }; ++ int i; ++ u32 j; ++ ++ for (i = 0; i < ARRAY_SIZE(test_pattern); i++) { ++ for (j = 0; j < len; j += 4) { ++ u32 val; ++ ++ tg3_write_mem(tp, offset + j, test_pattern[i]); ++ tg3_read_mem(tp, offset + j, &val); ++ if (val != test_pattern[i]) ++ return -EIO; ++ } ++ } ++ return 0; ++} ++ ++static int tg3_test_memory(struct tg3 *tp) ++{ ++ static struct mem_entry { ++ u32 offset; ++ u32 len; ++ } mem_tbl_570x[] = { ++ { 0x00000000, 0x00b50}, ++ { 0x00002000, 0x1c000}, ++ { 0xffffffff, 0x00000} ++ }, mem_tbl_5705[] = { ++ { 0x00000100, 0x0000c}, ++ { 0x00000200, 0x00008}, ++ { 0x00004000, 0x00800}, ++ { 0x00006000, 0x01000}, ++ { 0x00008000, 0x02000}, ++ { 0x00010000, 0x0e000}, ++ { 0xffffffff, 0x00000} ++ }, mem_tbl_5755[] = { ++ { 0x00000200, 0x00008}, ++ { 0x00004000, 0x00800}, ++ { 0x00006000, 0x00800}, ++ { 0x00008000, 0x02000}, ++ { 0x00010000, 0x0c000}, ++ { 0xffffffff, 0x00000} ++ }, mem_tbl_5906[] = { ++ { 0x00000200, 0x00008}, ++ { 0x00004000, 0x00400}, ++ { 0x00006000, 0x00400}, ++ { 0x00008000, 0x01000}, ++ { 0x00010000, 0x01000}, ++ { 0xffffffff, 0x00000} ++ }, mem_tbl_5717[] = { ++ { 0x00000200, 0x00008}, ++ { 0x00010000, 0x0a000}, ++ { 0x00020000, 0x13c00}, ++ { 0xffffffff, 0x00000} ++ }, mem_tbl_57765[] = { ++ { 0x00000200, 0x00008}, ++ { 0x00004000, 0x00800}, ++ { 0x00006000, 0x09800}, ++ { 0x00010000, 0x0a000}, ++ { 0xffffffff, 0x00000} ++ }; ++ struct mem_entry *mem_tbl; ++ int err = 0; ++ int i; ++ ++ if (tg3_flag(tp, 5717_PLUS)) ++ mem_tbl = mem_tbl_5717; ++ else if (tg3_flag(tp, 57765_CLASS) || ++ tg3_asic_rev(tp) == ASIC_REV_5762) ++ mem_tbl = mem_tbl_57765; ++ else if (tg3_flag(tp, 5755_PLUS)) ++ mem_tbl = mem_tbl_5755; ++ else if (tg3_asic_rev(tp) == ASIC_REV_5906) ++ mem_tbl = mem_tbl_5906; ++ else if (tg3_flag(tp, 5705_PLUS)) ++ mem_tbl = mem_tbl_5705; ++ else ++ mem_tbl = mem_tbl_570x; ++ ++ for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) { ++ err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len); ++ if (err) ++ break; ++ } ++ ++ return err; ++} ++ ++#define TG3_TSO_MSS 500 ++ ++#define TG3_TSO_IP_HDR_LEN 20 ++#define TG3_TSO_TCP_HDR_LEN 20 ++#define TG3_TSO_TCP_OPT_LEN 12 ++ ++static const u8 tg3_tso_header[] = { ++0x08, 0x00, ++0x45, 0x00, 0x00, 0x00, ++0x00, 0x00, 0x40, 0x00, ++0x40, 0x06, 0x00, 0x00, ++0x0a, 0x00, 0x00, 0x01, ++0x0a, 0x00, 0x00, 0x02, ++0x0d, 0x00, 0xe0, 0x00, ++0x00, 0x00, 0x01, 0x00, ++0x00, 0x00, 0x02, 0x00, ++0x80, 0x10, 0x10, 0x00, ++0x14, 0x09, 0x00, 0x00, ++0x01, 0x01, 0x08, 0x0a, ++0x11, 0x11, 0x11, 0x11, ++0x11, 0x11, 0x11, 0x11, ++}; ++ ++static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback) ++{ ++ u32 rx_start_idx, rx_idx, tx_idx, opaque_key; ++ u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val; ++ u32 budget; ++ struct sk_buff *skb; ++#ifndef BCM_HAS_BUILD_SKB ++ struct sk_buff *rx_skb; ++#endif ++ u8 *tx_data, *rx_data; ++ dma_addr_t map; ++ int num_pkts, tx_len, rx_len, i, err; ++ struct tg3_rx_buffer_desc *desc; ++ struct tg3_napi *tnapi, *rnapi; ++ struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring; ++ ++ tnapi = &tp->napi[0]; ++ rnapi = &tp->napi[0]; ++ if (tg3_flag(tp, ENABLE_RSS)) ++ rnapi = &tp->napi[1]; ++ if (tg3_flag(tp, ENABLE_TSS)) ++ tnapi = &tp->napi[1]; ++ coal_now = tnapi->coal_now | rnapi->coal_now; ++ ++ err = -EIO; ++ ++ tx_len = pktsz; ++ skb = netdev_alloc_skb(tp->dev, tx_len); ++ if (!skb) ++ return -ENOMEM; ++ ++ tx_data = skb_put(skb, tx_len); ++ memcpy(tx_data, tp->dev->dev_addr, ETH_ALEN); ++ memset(tx_data + ETH_ALEN, 0x0, 8); ++ ++ tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN); ++ ++#if TG3_TSO_SUPPORT != 0 ++ if (tso_loopback) { ++ struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN]; ++ ++ u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN + ++ TG3_TSO_TCP_OPT_LEN; ++ ++ memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header, ++ sizeof(tg3_tso_header)); ++ mss = TG3_TSO_MSS; ++ ++ val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header); ++ num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS); ++ ++ /* Set the total length field in the IP header */ ++ iph->tot_len = htons((u16)(mss + hdr_len)); ++ ++ base_flags = (TXD_FLAG_CPU_PRE_DMA | ++ TXD_FLAG_CPU_POST_DMA); ++ ++ if (tg3_flag(tp, HW_TSO_1) || ++ tg3_flag(tp, HW_TSO_2) || ++ tg3_flag(tp, HW_TSO_3)) { ++ struct tcphdr *th; ++ val = ETH_HLEN + TG3_TSO_IP_HDR_LEN; ++ th = (struct tcphdr *)&tx_data[val]; ++ th->check = 0; ++ } else ++ base_flags |= TXD_FLAG_TCPUDP_CSUM; ++ ++ if (tg3_flag(tp, HW_TSO_3)) { ++ mss |= (hdr_len & 0xc) << 12; ++ if (hdr_len & 0x10) ++ base_flags |= 0x00000010; ++ base_flags |= (hdr_len & 0x3e0) << 5; ++ } else if (tg3_flag(tp, HW_TSO_2)) ++ mss |= hdr_len << 9; ++ else if (tg3_flag(tp, HW_TSO_1) || ++ tg3_asic_rev(tp) == ASIC_REV_5705) { ++ mss |= (TG3_TSO_TCP_OPT_LEN << 9); ++ } else { ++ base_flags |= (TG3_TSO_TCP_OPT_LEN << 10); ++ } ++ ++ data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header); ++ } else ++#endif ++ { ++ num_pkts = 1; ++ data_off = ETH_HLEN; ++ ++ if (tg3_flag(tp, USE_JUMBO_BDFLAG) && ++ tx_len > VLAN_ETH_FRAME_LEN) ++ base_flags |= TXD_FLAG_JMB_PKT; ++ } ++ ++ for (i = data_off; i < tx_len; i++) ++ tx_data[i] = (u8) (i & 0xff); ++ ++ map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE); ++ if (pci_dma_mapping_error_(tp->pdev, map)) { ++ dev_kfree_skb(skb); ++ return -EIO; ++ } ++ ++ val = tnapi->tx_prod; ++ tnapi->tx_buffers[val].skb = skb; ++ dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map); ++ ++ tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE | ++ rnapi->coal_now); ++ ++ udelay(10); ++ ++ rx_start_idx = rnapi->hw_status->idx[0].rx_producer; ++ ++ budget = tg3_tx_avail(tnapi); ++ if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len, ++ base_flags | TXD_FLAG_END, mss, 0)) { ++ tnapi->tx_buffers[val].skb = NULL; ++ dev_kfree_skb(skb); ++ return -EIO; ++ } ++ ++ tnapi->tx_prod++; ++ ++ /* Sync BD data before updating mailbox */ ++ wmb(); ++ ++ tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod); ++ tr32_mailbox(tnapi->prodmbox); ++ ++ udelay(10); ++ ++ /* 350 usec to allow enough time on some 10/100 Mbps devices. */ ++ for (i = 0; i < 35; i++) { ++ tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE | ++ coal_now); ++ ++ udelay(10); ++ ++ tx_idx = tnapi->hw_status->idx[0].tx_consumer; ++ rx_idx = rnapi->hw_status->idx[0].rx_producer; ++ if ((tx_idx == tnapi->tx_prod) && ++ (rx_idx == (rx_start_idx + num_pkts))) ++ break; ++ } ++ ++ tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1); ++ dev_kfree_skb(skb); ++ ++ if (tx_idx != tnapi->tx_prod) ++ goto out; ++ ++ if (rx_idx != rx_start_idx + num_pkts) ++ goto out; ++ ++ val = data_off; ++ while (rx_idx != rx_start_idx) { ++ desc = &rnapi->rx_rcb[rx_start_idx++]; ++ desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK; ++ opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK; ++ ++ if ((desc->err_vlan & RXD_ERR_MASK) != 0 && ++ (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) ++ goto out; ++ ++ rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) ++ - ETH_FCS_LEN; ++ ++ if (!tso_loopback) { ++ if (rx_len != tx_len) ++ goto out; ++ ++ if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) { ++ if (opaque_key != RXD_OPAQUE_RING_STD) ++ goto out; ++ } else { ++ if (opaque_key != RXD_OPAQUE_RING_JUMBO) ++ goto out; ++ } ++ } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) && ++ (desc->ip_tcp_csum & RXD_TCPCSUM_MASK) ++ >> RXD_TCPCSUM_SHIFT != 0xffff) { ++ goto out; ++ } ++ ++ if (opaque_key == RXD_OPAQUE_RING_STD) { ++#ifdef BCM_HAS_BUILD_SKB ++ rx_data = tpr->rx_std_buffers[desc_idx].data; ++#else ++ rx_skb = tpr->rx_std_buffers[desc_idx].data; ++ rx_data = rx_skb->data; ++#endif ++ map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx], ++ mapping); ++ } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) { ++#ifdef BCM_HAS_BUILD_SKB ++ rx_data = tpr->rx_jmb_buffers[desc_idx].data; ++#else ++ rx_skb = tpr->rx_jmb_buffers[desc_idx].data; ++ rx_data = rx_skb->data; ++#endif ++ map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx], ++ mapping); ++ } else ++ goto out; ++ ++ pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, ++ PCI_DMA_FROMDEVICE); ++ ++ for (i = data_off; i < rx_len; i++, val++) { ++ if (*(rx_data + TG3_RX_OFFSET(tp) + i) != (u8) (val & 0xff)) ++ goto out; ++ } ++ } ++ ++ err = 0; ++ ++ /* tg3_free_rings will unmap and free the rx_data */ ++out: ++ return err; ++} ++ ++#define TG3_STD_LOOPBACK_FAILED 1 ++#define TG3_JMB_LOOPBACK_FAILED 2 ++#define TG3_TSO_LOOPBACK_FAILED 4 ++#define TG3_LOOPBACK_FAILED \ ++ (TG3_STD_LOOPBACK_FAILED | \ ++ TG3_JMB_LOOPBACK_FAILED | \ ++ TG3_TSO_LOOPBACK_FAILED) ++ ++static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk) ++{ ++ int err = -EIO; ++ u32 eee_cap; ++ u32 jmb_pkt_sz = 9000; ++ ++ if (tp->dma_limit) ++ jmb_pkt_sz = tp->dma_limit - ETH_HLEN; ++ ++ eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP; ++ tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP; ++ ++ if (!netif_running(tp->dev)) { ++ data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED; ++ data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED; ++ if (do_extlpbk) ++ data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED; ++ goto done; ++ } ++ ++ err = tg3_reset_hw(tp, true); ++ if (err) { ++ data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED; ++ data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED; ++ if (do_extlpbk) ++ data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED; ++ goto done; ++ } ++ ++ if (tg3_flag(tp, ENABLE_RSS)) { ++ int i; ++ ++ /* Reroute all rx packets to the 1st queue */ ++ for (i = MAC_RSS_INDIR_TBL_0; ++ i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4) ++ tw32(i, 0x0); ++ } ++ ++ /* HW errata - mac loopback fails in some cases on 5780. ++ * Normal traffic and PHY loopback are not affected by ++ * errata. Also, the MAC loopback test is deprecated for ++ * all newer ASIC revisions. ++ */ ++ if (tg3_asic_rev(tp) != ASIC_REV_5780 && ++ !tg3_flag(tp, CPMU_PRESENT)) { ++ tg3_mac_loopback(tp, true); ++ ++ if (tg3_run_loopback(tp, ETH_FRAME_LEN, false)) ++ data[TG3_MAC_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED; ++ ++ if (tg3_flag(tp, JUMBO_RING_ENABLE) && ++ tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false)) ++ data[TG3_MAC_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED; ++ ++ tg3_mac_loopback(tp, false); ++ } ++ ++ if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) && ++ !tg3_flag(tp, USE_PHYLIB)) { ++ int i; ++ ++ tg3_phy_lpbk_set(tp, 0, false); ++ ++ /* Wait for link */ ++ for (i = 0; i < 700; i++) { ++ if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP) ++ break; ++ mdelay(1); ++ } ++ ++ if (i == 700) { ++ netdev_info(tp->dev, "No link for loopback test!\n" ); ++ data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED; ++ return -EIO; ++ } ++ ++ if (tg3_run_loopback(tp, ETH_FRAME_LEN, false)) ++ data[TG3_PHY_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED; ++#if TG3_TSO_SUPPORT != 0 ++ if (tg3_flag(tp, TSO_CAPABLE) && ++ tg3_run_loopback(tp, ETH_FRAME_LEN, true)) ++ data[TG3_PHY_LOOPB_TEST] |= TG3_TSO_LOOPBACK_FAILED; ++#endif ++ if (tg3_flag(tp, JUMBO_RING_ENABLE) && ++ tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false)) ++ data[TG3_PHY_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED; ++ ++ if (do_extlpbk) { ++ tg3_phy_lpbk_set(tp, 0, true); ++ ++ /* All link indications report up, but the hardware ++ * isn't really ready for about 20 msec. Double it ++ * to be sure. ++ */ ++ mdelay(40); ++ ++ if (tg3_run_loopback(tp, ETH_FRAME_LEN, false)) ++ data[TG3_EXT_LOOPB_TEST] |= ++ TG3_STD_LOOPBACK_FAILED; ++ if (tg3_flag(tp, TSO_CAPABLE) && ++ tg3_run_loopback(tp, ETH_FRAME_LEN, true)) ++ data[TG3_EXT_LOOPB_TEST] |= ++ TG3_TSO_LOOPBACK_FAILED; ++ if (tg3_flag(tp, JUMBO_RING_ENABLE) && ++ tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false)) ++ data[TG3_EXT_LOOPB_TEST] |= ++ TG3_JMB_LOOPBACK_FAILED; ++ } ++ ++ /* Re-enable gphy autopowerdown. */ ++ if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD) ++ tg3_phy_toggle_apd(tp, true); ++ } ++ ++ err = (data[TG3_MAC_LOOPB_TEST] | data[TG3_PHY_LOOPB_TEST] | ++ data[TG3_EXT_LOOPB_TEST]) ? -EIO : 0; ++ ++done: ++ tp->phy_flags |= eee_cap; ++ ++ return err; ++} ++ ++static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest, ++ u64 *data) ++{ ++ struct tg3 *tp = netdev_priv(dev); ++ bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB; ++ ++ if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) { ++ if (tg3_power_up(tp)) { ++ etest->flags |= ETH_TEST_FL_FAILED; ++ memset(data, 1, sizeof(u64) * TG3_NUM_TEST); ++ return; ++ } ++ tg3_ape_driver_state_change(tp, RESET_KIND_INIT); ++ } ++ ++ memset(data, 0, sizeof(u64) * TG3_NUM_TEST); ++ ++ if (tg3_test_nvram(tp) != 0) { ++ etest->flags |= ETH_TEST_FL_FAILED; ++ data[TG3_NVRAM_TEST] = 1; ++ } ++ if (!doextlpbk && tg3_test_link(tp)) { ++ etest->flags |= ETH_TEST_FL_FAILED; ++ data[TG3_LINK_TEST] = 1; ++ } ++ if (etest->flags & ETH_TEST_FL_OFFLINE) { ++ int err, err2 = 0, irq_sync = 0; ++ ++ if (netif_running(dev)) { ++ tg3_phy_stop(tp); ++ tg3_netif_stop(tp); ++#ifdef TG3_VMWARE_NETQ_ENABLE ++ tg3_netq_invalidate_state(tp); ++#endif ++ irq_sync = 1; ++ } ++ ++ tg3_full_lock(tp, irq_sync); ++ tg3_halt(tp, RESET_KIND_SUSPEND, 1); ++ err = tg3_nvram_lock(tp); ++ tg3_halt_cpu(tp, RX_CPU_BASE); ++ if (!tg3_flag(tp, 5705_PLUS)) ++ tg3_halt_cpu(tp, TX_CPU_BASE); ++ if (!err) ++ tg3_nvram_unlock(tp); ++ ++ if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) ++ tg3_phy_reset(tp); ++ ++ if (tg3_test_registers(tp) != 0) { ++ etest->flags |= ETH_TEST_FL_FAILED; ++ data[TG3_REGISTER_TEST] = 1; ++ } ++ ++ if (tg3_test_memory(tp) != 0) { ++ etest->flags |= ETH_TEST_FL_FAILED; ++ data[TG3_MEMORY_TEST] = 1; ++ } ++ ++ if (doextlpbk) ++ etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE; ++ ++ if (tg3_test_loopback(tp, data, doextlpbk)) ++ etest->flags |= ETH_TEST_FL_FAILED; ++ ++ tg3_full_unlock(tp); ++ ++ if (tg3_test_interrupt(tp) != 0) { ++ etest->flags |= ETH_TEST_FL_FAILED; ++ data[TG3_INTERRUPT_TEST] = 1; ++ } ++ ++ tg3_full_lock(tp, 0); ++ ++ tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); ++ if (netif_running(dev)) { ++ tg3_flag_set(tp, INIT_COMPLETE); ++ err2 = tg3_restart_hw(tp, true); ++ if (!err2) ++ tg3_netif_start(tp); ++ } ++ ++ tg3_full_unlock(tp); ++ ++ if (irq_sync && !err2) ++ tg3_phy_start(tp); ++ } ++ if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) ++ tg3_power_down_prepare(tp); ++ ++} ++ ++#ifdef BCM_HAS_IEEE1588_SUPPORT ++static int tg3_hwtstamp_set(struct net_device *dev, struct ifreq *ifr) ++{ ++ struct tg3 *tp = netdev_priv(dev); ++ struct hwtstamp_config stmpconf; ++ ++ if (!tg3_flag(tp, PTP_CAPABLE)) ++ return -EOPNOTSUPP; ++ ++ if (copy_from_user(&stmpconf, ifr->ifr_data, sizeof(stmpconf))) ++ return -EFAULT; ++ ++ if (stmpconf.flags) ++ return -EINVAL; ++ ++ if (stmpconf.tx_type != HWTSTAMP_TX_ON && ++ stmpconf.tx_type != HWTSTAMP_TX_OFF) ++ return -ERANGE; ++ ++ switch (stmpconf.rx_filter) { ++ case HWTSTAMP_FILTER_NONE: ++ tp->rxptpctl = 0; ++ break; ++ case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: ++ tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN | ++ TG3_RX_PTP_CTL_ALL_V1_EVENTS; ++ break; ++ case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: ++ tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN | ++ TG3_RX_PTP_CTL_SYNC_EVNT; ++ break; ++ case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: ++ tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN | ++ TG3_RX_PTP_CTL_DELAY_REQ; ++ break; ++ case HWTSTAMP_FILTER_PTP_V2_EVENT: ++ tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN | ++ TG3_RX_PTP_CTL_ALL_V2_EVENTS; ++ break; ++ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: ++ tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | ++ TG3_RX_PTP_CTL_ALL_V2_EVENTS; ++ break; ++ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: ++ tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | ++ TG3_RX_PTP_CTL_ALL_V2_EVENTS; ++ break; ++ case HWTSTAMP_FILTER_PTP_V2_SYNC: ++ tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN | ++ TG3_RX_PTP_CTL_SYNC_EVNT; ++ break; ++ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: ++ tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | ++ TG3_RX_PTP_CTL_SYNC_EVNT; ++ break; ++ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: ++ tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | ++ TG3_RX_PTP_CTL_SYNC_EVNT; ++ break; ++ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: ++ tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN | ++ TG3_RX_PTP_CTL_DELAY_REQ; ++ break; ++ case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: ++ tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | ++ TG3_RX_PTP_CTL_DELAY_REQ; ++ break; ++ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: ++ tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | ++ TG3_RX_PTP_CTL_DELAY_REQ; ++ break; ++ default: ++ return -ERANGE; ++ } ++ ++ if (netif_running(dev) && tp->rxptpctl) ++ tw32(TG3_RX_PTP_CTL, ++ tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK); ++ ++ if (stmpconf.tx_type == HWTSTAMP_TX_ON) ++ tg3_flag_set(tp, TX_TSTAMP_EN); ++ else ++ tg3_flag_clear(tp, TX_TSTAMP_EN); ++ ++ return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ? ++ -EFAULT : 0; ++} ++ ++static int tg3_hwtstamp_get(struct net_device *dev, struct ifreq *ifr) ++{ ++ struct tg3 *tp = netdev_priv(dev); ++ struct hwtstamp_config stmpconf; ++ ++ if (!tg3_flag(tp, PTP_CAPABLE)) ++ return -EOPNOTSUPP; ++ ++ stmpconf.flags = 0; ++ stmpconf.tx_type = (tg3_flag(tp, TX_TSTAMP_EN) ? ++ HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF); ++ ++ switch (tp->rxptpctl) { ++ case 0: ++ stmpconf.rx_filter = HWTSTAMP_FILTER_NONE; ++ break; ++ case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_ALL_V1_EVENTS: ++ stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT; ++ break; ++ case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_SYNC_EVNT: ++ stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC; ++ break; ++ case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_DELAY_REQ: ++ stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ; ++ break; ++ case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS: ++ stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; ++ break; ++ case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS: ++ stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT; ++ break; ++ case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS: ++ stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT; ++ break; ++ case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_SYNC_EVNT: ++ stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC; ++ break; ++ case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_SYNC_EVNT: ++ stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_SYNC; ++ break; ++ case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_SYNC_EVNT: ++ stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC; ++ break; ++ case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_DELAY_REQ: ++ stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ; ++ break; ++ case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_DELAY_REQ: ++ stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ; ++ break; ++ case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_DELAY_REQ: ++ stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ; ++ break; ++ default: ++ WARN_ON_ONCE(1); ++ return -ERANGE; ++ } ++ ++ return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ? ++ -EFAULT : 0; ++} ++#endif /* BCM_HAS_IEEE1588_SUPPORT */ ++ ++static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) ++{ ++#if (LINUX_VERSION_CODE >= 0x020607) ++ struct mii_ioctl_data *data = if_mii(ifr); ++#else ++ struct mii_ioctl_data *data = (struct mii_ioctl_data *) &ifr->ifr_ifru; ++#endif ++ struct tg3 *tp = netdev_priv(dev); ++ int err; ++ ++#ifdef BCM_INCLUDE_PHYLIB_SUPPORT ++ if (tg3_flag(tp, USE_PHYLIB)) { ++ struct phy_device *phydev; ++ if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) ++ return -EAGAIN; ++ phydev = tp->mdio_bus->phy_map[tp->phy_addr]; ++ return phy_mii_ioctl(phydev, ifr, cmd); ++ } ++#endif /* BCM_INCLUDE_PHYLIB_SUPPORT */ ++ ++ switch (cmd) { ++ case SIOCGMIIPHY: ++ data->phy_id = tp->phy_addr; ++ ++ /* fallthru */ ++ case SIOCGMIIREG: { ++ u32 mii_regval; ++ ++ if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ++ break; /* We have no PHY */ ++ ++ if (!netif_running(dev)) ++ return -EAGAIN; ++ ++ spin_lock_bh(&tp->lock); ++ err = __tg3_readphy(tp, data->phy_id & 0x1f, ++ data->reg_num & 0x1f, &mii_regval); ++ spin_unlock_bh(&tp->lock); ++ ++ data->val_out = mii_regval; ++ ++ return err; ++ } ++ ++ case SIOCSMIIREG: ++ if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ++ break; /* We have no PHY */ ++ ++ if (!netif_running(dev)) ++ return -EAGAIN; ++ ++ spin_lock_bh(&tp->lock); ++ err = __tg3_writephy(tp, data->phy_id & 0x1f, ++ data->reg_num & 0x1f, data->val_in); ++ spin_unlock_bh(&tp->lock); ++ ++ return err; ++ ++#if defined(__VMKLNX__) && !defined(TG3_VMWARE_BMAPILNX_DISABLE) ++ case BRCM_VMWARE_CIM_IOCTL: ++ return tg3_vmware_ioctl_cim(dev, ifr); ++#endif /* TG3_VMWARE_BMAPILNX */ ++ ++#ifdef BCM_HAS_IEEE1588_SUPPORT ++ case SIOCSHWTSTAMP: ++ return tg3_hwtstamp_set(dev, ifr); ++ ++ case SIOCGHWTSTAMP: ++ return tg3_hwtstamp_get(dev, ifr); ++#endif /* BCM_HAS_IEEE1588_SUPPORT */ ++ ++ default: ++ /* do nothing */ ++ break; ++ } ++ return -EOPNOTSUPP; ++} ++ ++static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec) ++{ ++ struct tg3 *tp = netdev_priv(dev); ++ ++ memcpy(ec, &tp->coal, sizeof(*ec)); ++ return 0; ++} ++ ++static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec) ++{ ++ struct tg3 *tp = netdev_priv(dev); ++ u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0; ++ u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0; ++ ++ if (!tg3_flag(tp, 5705_PLUS)) { ++ max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT; ++ max_txcoal_tick_int = MAX_TXCOAL_TICK_INT; ++ max_stat_coal_ticks = MAX_STAT_COAL_TICKS; ++ min_stat_coal_ticks = MIN_STAT_COAL_TICKS; ++ } ++ ++ if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) || ++ (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) || ++ (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) || ++ (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) || ++ (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) || ++ (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) || ++ (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) || ++ (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) || ++ (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) || ++ (ec->stats_block_coalesce_usecs < min_stat_coal_ticks)) ++ return -EINVAL; ++ ++ /* No rx interrupts will be generated if both are zero */ ++ if ((ec->rx_coalesce_usecs == 0) && ++ (ec->rx_max_coalesced_frames == 0)) ++ return -EINVAL; ++ ++ /* No tx interrupts will be generated if both are zero */ ++ if ((ec->tx_coalesce_usecs == 0) && ++ (ec->tx_max_coalesced_frames == 0)) ++ return -EINVAL; ++ ++ /* Only copy relevant parameters, ignore all others. */ ++ tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs; ++ tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs; ++ tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames; ++ tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames; ++ tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq; ++ tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq; ++ tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq; ++ tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq; ++ tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs; ++ ++ if (netif_running(dev)) { ++ tg3_full_lock(tp, 0); ++ __tg3_set_coalesce(tp, &tp->coal); ++ tg3_full_unlock(tp); ++ } ++ return 0; ++} ++ ++static u32 tg3_get_link(struct net_device *dev) ++{ ++ struct tg3 *tp = netdev_priv(dev); ++ ++ if (!netif_running(tp->dev)) ++ return 0; ++ ++ if (tg3_flag(tp, POLL_CPMU_LINK)) { ++ u32 cpmu = tr32(TG3_CPMU_STATUS); ++ return !((cpmu & TG3_CPMU_STATUS_LINK_MASK) == ++ TG3_CPMU_STATUS_LINK_MASK); ++ } ++ ++ return tp->link_up; ++} ++ ++#if defined(ETHTOOL_GEEE) ++static int tg3_set_eee(struct net_device *dev, struct ethtool_eee *edata) ++{ ++ struct tg3 *tp = netdev_priv(dev); ++ ++ if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) { ++ netdev_warn(tp->dev, "Board does not support EEE!\n"); ++ return -EOPNOTSUPP; ++ } ++ ++ if (edata->advertised != tp->eee.advertised) { ++ netdev_warn(tp->dev, ++ "Direct manipulation of EEE advertisement is not supported\n"); ++ return -EINVAL; ++ } ++ ++ if (edata->tx_lpi_timer > TG3_CPMU_DBTMR1_LNKIDLE_MAX) { ++ netdev_warn(tp->dev, ++ "Maximal Tx Lpi timer supported is %#x(u)\n", ++ TG3_CPMU_DBTMR1_LNKIDLE_MAX); ++ return -EINVAL; ++ } ++ ++ tp->eee = *edata; ++ ++ tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED; ++ tg3_warn_mgmt_link_flap(tp); ++ ++ if (netif_running(tp->dev)) { ++ tg3_full_lock(tp, 0); ++ tg3_setup_eee(tp); ++ tg3_phy_reset(tp); ++ tg3_full_unlock(tp); ++ } ++ ++ return 0; ++} ++ ++static int tg3_get_eee(struct net_device *dev, struct ethtool_eee *edata) ++{ ++ struct tg3 *tp = netdev_priv(dev); ++ ++ if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) { ++ netdev_warn(tp->dev, ++ "Board does not support EEE!\n"); ++ return -EOPNOTSUPP; ++ } ++ ++ *edata = tp->eee; ++ return 0; ++} ++#endif ++ ++static struct ethtool_ops tg3_ethtool_ops = { ++ .get_settings = tg3_get_settings, ++ .set_settings = tg3_set_settings, ++ .get_drvinfo = tg3_get_drvinfo, ++ .get_regs_len = tg3_get_regs_len, ++ .get_regs = tg3_get_regs, ++ .get_wol = tg3_get_wol, ++ .set_wol = tg3_set_wol, ++ .get_msglevel = tg3_get_msglevel, ++ .set_msglevel = tg3_set_msglevel, ++ .nway_reset = tg3_nway_reset, ++ .get_link = tg3_get_link, ++#if (LINUX_VERSION_CODE >= 0x20418) ++ .get_eeprom_len = tg3_get_eeprom_len, ++#endif ++#ifdef ETHTOOL_GEEPROM ++ .get_eeprom = tg3_get_eeprom, ++#endif ++#ifdef ETHTOOL_SEEPROM ++ .set_eeprom = tg3_set_eeprom, ++#endif ++ .get_ringparam = tg3_get_ringparam, ++ .set_ringparam = tg3_set_ringparam, ++ .get_pauseparam = tg3_get_pauseparam, ++ .set_pauseparam = tg3_set_pauseparam, ++ .self_test = tg3_self_test, ++ .get_strings = tg3_get_strings, ++#if defined(BCM_HAS_SET_PHYS_ID) && !defined(GET_ETHTOOL_OP_EXT) ++ .set_phys_id = tg3_set_phys_id, ++#endif ++ .get_ethtool_stats = tg3_get_ethtool_stats, ++ .get_coalesce = tg3_get_coalesce, ++ .set_coalesce = tg3_set_coalesce, ++#if (LINUX_VERSION_CODE >= 0x20618) || defined (__VMKLNX__) ++ .get_sset_count = tg3_get_sset_count, ++#endif ++#if defined(BCM_HAS_GET_RXNFC) && !defined(GET_ETHTOOL_OP_EXT) ++ .get_rxnfc = tg3_get_rxnfc, ++#endif /* BCM_HAS_GET_RXNFC */ ++#if defined(BCM_HAS_GET_RXFH_INDIR) && !defined(GET_ETHTOOL_OP_EXT) ++#ifdef BCM_HAS_GET_RXFH_INDIR_SIZE ++ .get_rxfh_indir_size = tg3_get_rxfh_indir_size, ++#endif /* BCM_HAS_GET_RXFH_INDIR_SIZE */ ++#ifdef BCM_HAS_OLD_RXFH_INDIR ++ .get_rxfh_indir = tg3_get_rxfh_indir, ++ .set_rxfh_indir = tg3_set_rxfh_indir, ++#else ++ .get_rxfh = tg3_get_rxfh, ++ .set_rxfh = tg3_set_rxfh, ++#endif ++#endif /* BCM_HAS_GET_RXFH_INDIR */ ++#if defined(ETHTOOL_GCHANNELS) && !defined(GET_ETHTOOL_OP_EXT) ++ .get_channels = tg3_get_channels, ++ .set_channels = tg3_set_channels, ++#endif ++ ++#ifndef BCM_HAS_NETDEV_UPDATE_FEATURES ++ .get_rx_csum = tg3_get_rx_csum, ++ .set_rx_csum = tg3_set_rx_csum, ++ .get_tx_csum = ethtool_op_get_tx_csum, ++#ifdef BCM_HAS_SET_TX_CSUM ++ .set_tx_csum = tg3_set_tx_csum, ++#endif ++#if TG3_TSO_SUPPORT != 0 ++ .get_tso = ethtool_op_get_tso, ++ .set_tso = tg3_set_tso, ++#endif ++#endif /* BCM_HAS_NETDEV_UPDATE_FEATURES */ ++#ifdef ETHTOOL_GSG ++#if defined(BCM_HAS_ETHTOOL_OP_SET_SG) && !defined(BCM_HAS_FIX_FEATURES) ++ .get_sg = ethtool_op_get_sg, ++ .set_sg = ethtool_op_set_sg, ++#endif ++#endif ++#if (LINUX_VERSION_CODE < 0x20618) ++ .self_test_count = tg3_get_test_count, ++#endif ++#if !defined(BCM_HAS_SET_PHYS_ID) || defined(GET_ETHTOOL_OP_EXT) ++ .phys_id = tg3_phys_id, ++#endif ++#if (LINUX_VERSION_CODE < 0x20618) ++ .get_stats_count = tg3_get_stats_count, ++#endif ++#if defined(ETHTOOL_GPERMADDR) && (LINUX_VERSION_CODE < 0x020617) ++ .get_perm_addr = ethtool_op_get_perm_addr, ++#endif ++#if IS_ENABLED(CONFIG_PTP_1588_CLOCK) && defined(ETHTOOL_GET_TS_INFO) && !defined(GET_ETHTOOL_OP_EXT) ++ .get_ts_info = tg3_get_ts_info, ++#endif ++#if defined(ETHTOOL_GEEE) && !defined(GET_ETHTOOL_OP_EXT) ++ .get_eee = tg3_get_eee, ++ .set_eee = tg3_set_eee, ++#endif ++}; ++ ++#ifdef GET_ETHTOOL_OP_EXT ++static const struct ethtool_ops_ext tg3_ethtool_ops_ext = { ++ .size = sizeof(struct ethtool_ops_ext), ++ .get_ts_info = tg3_get_ts_info, ++#ifdef ETHTOOL_GEEE ++ .get_eee = tg3_get_eee, ++ .set_eee = tg3_set_eee, ++#endif ++ .get_channels = tg3_get_channels, ++ .set_channels = tg3_set_channels, ++}; ++#endif ++ ++static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev, ++ struct rtnl_link_stats64 *stats) ++{ ++ struct tg3 *tp = netdev_priv(dev); ++ ++ spin_lock_bh(&tp->lock); ++ if (!tp->hw_stats) { ++ spin_unlock_bh(&tp->lock); ++ return &tp->net_stats_prev; ++ } ++ ++ tg3_get_nstats(tp, stats); ++ spin_unlock_bh(&tp->lock); ++ ++ return stats; ++} ++ ++#ifdef GET_NETDEV_OP_EXT ++static const struct net_device_ops_ext tg3_net_device_ops_ext = { ++ .size = sizeof(struct net_device_ops_ext), ++ .ndo_fix_features = tg3_fix_features, ++ .ndo_set_features = tg3_set_features, ++ .ndo_get_stats64 = tg3_get_stats64, ++}; ++#endif ++ ++static void tg3_set_rx_mode(struct net_device *dev) ++{ ++ struct tg3 *tp = netdev_priv(dev); ++ ++ if (!netif_running(dev)) ++ return; ++ ++ tg3_full_lock(tp, 0); ++ __tg3_set_rx_mode(dev); ++ tg3_full_unlock(tp); ++} ++ ++static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp, ++ int new_mtu) ++{ ++ dev->mtu = new_mtu; ++ ++ if (new_mtu > ETH_DATA_LEN) { ++ if (tg3_flag(tp, 5780_CLASS)) { ++ netdev_update_features(dev); ++ tg3_flag_clear(tp, TSO_CAPABLE); ++#if TG3_TSO_SUPPORT != 0 ++#ifdef BCM_HAS_ETHTOOL_OP_SET_TSO ++ ethtool_op_set_tso(dev, 0); ++#endif ++#endif ++ } else { ++ tg3_flag_set(tp, JUMBO_RING_ENABLE); ++ } ++ } else { ++ if (tg3_flag(tp, 5780_CLASS)) { ++ tg3_flag_set(tp, TSO_CAPABLE); ++ netdev_update_features(dev); ++ } ++ tg3_flag_clear(tp, JUMBO_RING_ENABLE); ++ } ++} ++ ++static int tg3_change_mtu(struct net_device *dev, int new_mtu) ++{ ++ struct tg3 *tp = netdev_priv(dev); ++ int err; ++ bool reset_phy = false; ++ ++ if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp)) ++ return -EINVAL; ++ ++ if (!netif_running(dev)) { ++ /* We'll just catch it later when the ++ * device is up'd. ++ */ ++ tg3_set_mtu(dev, tp, new_mtu); ++ return 0; ++ } ++ ++#if defined(__VMKLNX__) && (VMWARE_ESX_DDK_VERSION < 50000) ++ /* There is no need to hold rtnl_lock ++ * when calling change MTU into driver ++ * from VMkernel ESX 5.0 onwards. ++ */ ++ rtnl_lock(); ++#endif ++ ++ tg3_phy_stop(tp); ++ ++ tg3_netif_stop(tp); ++ ++ tg3_set_mtu(dev, tp, new_mtu); ++ ++#ifdef TG3_VMWARE_NETQ_ENABLE ++ tg3_netq_invalidate_state(tp); ++#endif ++ ++ tg3_full_lock(tp, 1); ++ ++ tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); ++ ++ /* Reset PHY, otherwise the read DMA engine will be in a mode that ++ * breaks all requests to 256 bytes. ++ */ ++ if (tg3_asic_rev(tp) == ASIC_REV_57766) ++ reset_phy = true; ++ ++ err = tg3_restart_hw(tp, reset_phy); ++ ++ if (!err) ++ tg3_netif_start(tp); ++ ++ tg3_full_unlock(tp); ++ ++ if (!err) ++ tg3_phy_start(tp); ++ ++#if defined(__VMKLNX__) && (VMWARE_ESX_DDK_VERSION < 50000) ++ rtnl_unlock(); ++#endif ++ ++ return err; ++} ++ ++#ifdef BCM_HAS_NET_DEVICE_OPS ++static const struct net_device_ops tg3_netdev_ops = { ++ .ndo_open = tg3_open, ++ .ndo_stop = tg3_close, ++ .ndo_start_xmit = tg3_start_xmit, ++#if defined(BCM_HAS_GET_STATS64) ++#if !defined(GET_NETDEV_OP_EXT) ++ .ndo_get_stats64 = tg3_get_stats64, ++#endif ++#else ++ .ndo_get_stats = tg3_get_stats, ++#endif ++ .ndo_validate_addr = eth_validate_addr, ++#ifdef BCM_HAS_SET_MULTICAST_LIST ++ .ndo_set_multicast_list = tg3_set_rx_mode, ++#else ++ .ndo_set_rx_mode = tg3_set_rx_mode, ++#endif ++ .ndo_set_mac_address = tg3_set_mac_addr, ++ .ndo_do_ioctl = tg3_ioctl, ++ .ndo_tx_timeout = tg3_tx_timeout, ++ .ndo_change_mtu = tg3_change_mtu, ++#if defined(BCM_HAS_FIX_FEATURES) && !defined(GET_NETDEV_OP_EXT) ++ .ndo_fix_features = tg3_fix_features, ++ .ndo_set_features = tg3_set_features, ++#endif ++#if defined(BCM_KERNEL_SUPPORTS_8021Q) && !defined(BCM_HAS_NEW_VLAN_INTERFACE) ++ .ndo_vlan_rx_register = tg3_vlan_rx_register, ++#endif ++#ifdef CONFIG_NET_POLL_CONTROLLER ++ .ndo_poll_controller = tg3_poll_controller, ++#endif ++}; ++#endif /* BCM_HAS_NET_DEVICE_OPS */ ++ ++static void __devinit tg3_get_eeprom_size(struct tg3 *tp) ++{ ++ u32 cursize, val, magic; ++ ++ tp->nvram_size = EEPROM_CHIP_SIZE; ++ ++ if (tg3_nvram_read(tp, 0, &magic) != 0) ++ return; ++ ++ if ((magic != TG3_EEPROM_MAGIC) && ++ ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) && ++ ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW)) ++ return; ++ ++ /* ++ * Size the chip by reading offsets at increasing powers of two. ++ * When we encounter our validation signature, we know the addressing ++ * has wrapped around, and thus have our chip size. ++ */ ++ cursize = 0x10; ++ ++ while (cursize < tp->nvram_size) { ++ if (tg3_nvram_read(tp, cursize, &val) != 0) ++ return; ++ ++ if (val == magic) ++ break; ++ ++ cursize <<= 1; ++ } ++ ++ tp->nvram_size = cursize; ++} ++ ++static void __devinit tg3_get_nvram_size(struct tg3 *tp) ++{ ++ u32 val; ++ ++ if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0) ++ return; ++ ++ /* Selfboot format */ ++ if (val != TG3_EEPROM_MAGIC) { ++ tg3_get_eeprom_size(tp); ++ return; ++ } ++ ++ if (tg3_nvram_read(tp, 0xf0, &val) == 0) { ++ if (val != 0) { ++ /* This is confusing. We want to operate on the ++ * 16-bit value at offset 0xf2. The tg3_nvram_read() ++ * call will read from NVRAM and byteswap the data ++ * according to the byteswapping settings for all ++ * other register accesses. This ensures the data we ++ * want will always reside in the lower 16-bits. ++ * However, the data in NVRAM is in LE format, which ++ * means the data from the NVRAM read will always be ++ * opposite the endianness of the CPU. The 16-bit ++ * byteswap then brings the data to CPU endianness. ++ */ ++ tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024; ++ return; ++ } ++ } ++ tp->nvram_size = TG3_NVRAM_SIZE_512KB; ++} ++ ++static void __devinit tg3_get_nvram_info(struct tg3 *tp) ++{ ++ u32 nvcfg1; ++ ++ nvcfg1 = tr32(NVRAM_CFG1); ++ if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) { ++ tg3_flag_set(tp, FLASH); ++ } else { ++ nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS; ++ tw32(NVRAM_CFG1, nvcfg1); ++ } ++ ++ if (tg3_asic_rev(tp) == ASIC_REV_5750 || ++ tg3_flag(tp, 5780_CLASS)) { ++ switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) { ++ case FLASH_VENDOR_ATMEL_FLASH_BUFFERED: ++ tp->nvram_jedecnum = JEDEC_ATMEL; ++ tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE; ++ tg3_flag_set(tp, NVRAM_BUFFERED); ++ break; ++ case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED: ++ tp->nvram_jedecnum = JEDEC_ATMEL; ++ tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE; ++ break; ++ case FLASH_VENDOR_ATMEL_EEPROM: ++ tp->nvram_jedecnum = JEDEC_ATMEL; ++ tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; ++ tg3_flag_set(tp, NVRAM_BUFFERED); ++ break; ++ case FLASH_VENDOR_ST: ++ tp->nvram_jedecnum = JEDEC_ST; ++ tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE; ++ tg3_flag_set(tp, NVRAM_BUFFERED); ++ break; ++ case FLASH_VENDOR_SAIFUN: ++ tp->nvram_jedecnum = JEDEC_SAIFUN; ++ tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE; ++ break; ++ case FLASH_VENDOR_SST_SMALL: ++ case FLASH_VENDOR_SST_LARGE: ++ tp->nvram_jedecnum = JEDEC_SST; ++ tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE; ++ break; ++ } ++ } else { ++ tp->nvram_jedecnum = JEDEC_ATMEL; ++ tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE; ++ tg3_flag_set(tp, NVRAM_BUFFERED); ++ } ++} ++ ++static void __devinit tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1) ++{ ++ switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) { ++ case FLASH_5752PAGE_SIZE_256: ++ tp->nvram_pagesize = 256; ++ break; ++ case FLASH_5752PAGE_SIZE_512: ++ tp->nvram_pagesize = 512; ++ break; ++ case FLASH_5752PAGE_SIZE_1K: ++ tp->nvram_pagesize = 1024; ++ break; ++ case FLASH_5752PAGE_SIZE_2K: ++ tp->nvram_pagesize = 2048; ++ break; ++ case FLASH_5752PAGE_SIZE_4K: ++ tp->nvram_pagesize = 4096; ++ break; ++ case FLASH_5752PAGE_SIZE_264: ++ tp->nvram_pagesize = 264; ++ break; ++ case FLASH_5752PAGE_SIZE_528: ++ tp->nvram_pagesize = 528; ++ break; ++ } ++} ++ ++static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp) ++{ ++ u32 nvcfg1; ++ ++ nvcfg1 = tr32(NVRAM_CFG1); ++ ++ /* NVRAM protection for TPM */ ++ if (nvcfg1 & (1 << 27)) ++ tg3_flag_set(tp, PROTECTED_NVRAM); ++ ++ switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { ++ case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ: ++ case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ: ++ tp->nvram_jedecnum = JEDEC_ATMEL; ++ tg3_flag_set(tp, NVRAM_BUFFERED); ++ break; ++ case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED: ++ tp->nvram_jedecnum = JEDEC_ATMEL; ++ tg3_flag_set(tp, NVRAM_BUFFERED); ++ tg3_flag_set(tp, FLASH); ++ break; ++ case FLASH_5752VENDOR_ST_M45PE10: ++ case FLASH_5752VENDOR_ST_M45PE20: ++ case FLASH_5752VENDOR_ST_M45PE40: ++ tp->nvram_jedecnum = JEDEC_ST; ++ tg3_flag_set(tp, NVRAM_BUFFERED); ++ tg3_flag_set(tp, FLASH); ++ break; ++ } ++ ++ if (tg3_flag(tp, FLASH)) { ++ tg3_nvram_get_pagesize(tp, nvcfg1); ++ } else { ++ /* For eeprom, set pagesize to maximum eeprom size */ ++ tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; ++ ++ nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS; ++ tw32(NVRAM_CFG1, nvcfg1); ++ } ++} ++ ++static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp) ++{ ++ u32 nvcfg1, protect = 0; ++ ++ nvcfg1 = tr32(NVRAM_CFG1); ++ ++ /* NVRAM protection for TPM */ ++ if (nvcfg1 & (1 << 27)) { ++ tg3_flag_set(tp, PROTECTED_NVRAM); ++ protect = 1; ++ } ++ ++ nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK; ++ switch (nvcfg1) { ++ case FLASH_5755VENDOR_ATMEL_FLASH_1: ++ case FLASH_5755VENDOR_ATMEL_FLASH_2: ++ case FLASH_5755VENDOR_ATMEL_FLASH_3: ++ case FLASH_5755VENDOR_ATMEL_FLASH_5: ++ tp->nvram_jedecnum = JEDEC_ATMEL; ++ tg3_flag_set(tp, NVRAM_BUFFERED); ++ tg3_flag_set(tp, FLASH); ++ tp->nvram_pagesize = 264; ++ if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 || ++ nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5) ++ tp->nvram_size = (protect ? 0x3e200 : ++ TG3_NVRAM_SIZE_512KB); ++ else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2) ++ tp->nvram_size = (protect ? 0x1f200 : ++ TG3_NVRAM_SIZE_256KB); ++ else ++ tp->nvram_size = (protect ? 0x1f200 : ++ TG3_NVRAM_SIZE_128KB); ++ break; ++ case FLASH_5752VENDOR_ST_M45PE10: ++ case FLASH_5752VENDOR_ST_M45PE20: ++ case FLASH_5752VENDOR_ST_M45PE40: ++ tp->nvram_jedecnum = JEDEC_ST; ++ tg3_flag_set(tp, NVRAM_BUFFERED); ++ tg3_flag_set(tp, FLASH); ++ tp->nvram_pagesize = 256; ++ if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10) ++ tp->nvram_size = (protect ? ++ TG3_NVRAM_SIZE_64KB : ++ TG3_NVRAM_SIZE_128KB); ++ else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20) ++ tp->nvram_size = (protect ? ++ TG3_NVRAM_SIZE_64KB : ++ TG3_NVRAM_SIZE_256KB); ++ else ++ tp->nvram_size = (protect ? ++ TG3_NVRAM_SIZE_128KB : ++ TG3_NVRAM_SIZE_512KB); ++ break; ++ } ++} ++ ++static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp) ++{ ++ u32 nvcfg1; ++ ++ nvcfg1 = tr32(NVRAM_CFG1); ++ ++ switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { ++ case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ: ++ case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ: ++ case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ: ++ case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ: ++ tp->nvram_jedecnum = JEDEC_ATMEL; ++ tg3_flag_set(tp, NVRAM_BUFFERED); ++ tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; ++ ++ nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS; ++ tw32(NVRAM_CFG1, nvcfg1); ++ break; ++ case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED: ++ case FLASH_5755VENDOR_ATMEL_FLASH_1: ++ case FLASH_5755VENDOR_ATMEL_FLASH_2: ++ case FLASH_5755VENDOR_ATMEL_FLASH_3: ++ tp->nvram_jedecnum = JEDEC_ATMEL; ++ tg3_flag_set(tp, NVRAM_BUFFERED); ++ tg3_flag_set(tp, FLASH); ++ tp->nvram_pagesize = 264; ++ break; ++ case FLASH_5752VENDOR_ST_M45PE10: ++ case FLASH_5752VENDOR_ST_M45PE20: ++ case FLASH_5752VENDOR_ST_M45PE40: ++ tp->nvram_jedecnum = JEDEC_ST; ++ tg3_flag_set(tp, NVRAM_BUFFERED); ++ tg3_flag_set(tp, FLASH); ++ tp->nvram_pagesize = 256; ++ break; ++ } ++} ++ ++static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp) ++{ ++ u32 nvcfg1, protect = 0; ++ ++ nvcfg1 = tr32(NVRAM_CFG1); ++ ++ /* NVRAM protection for TPM */ ++ if (nvcfg1 & (1 << 27)) { ++ tg3_flag_set(tp, PROTECTED_NVRAM); ++ protect = 1; ++ } ++ ++ nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK; ++ switch (nvcfg1) { ++ case FLASH_5761VENDOR_ATMEL_ADB021D: ++ case FLASH_5761VENDOR_ATMEL_ADB041D: ++ case FLASH_5761VENDOR_ATMEL_ADB081D: ++ case FLASH_5761VENDOR_ATMEL_ADB161D: ++ case FLASH_5761VENDOR_ATMEL_MDB021D: ++ case FLASH_5761VENDOR_ATMEL_MDB041D: ++ case FLASH_5761VENDOR_ATMEL_MDB081D: ++ case FLASH_5761VENDOR_ATMEL_MDB161D: ++ tp->nvram_jedecnum = JEDEC_ATMEL; ++ tg3_flag_set(tp, NVRAM_BUFFERED); ++ tg3_flag_set(tp, FLASH); ++ tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS); ++ tp->nvram_pagesize = 256; ++ break; ++ case FLASH_5761VENDOR_ST_A_M45PE20: ++ case FLASH_5761VENDOR_ST_A_M45PE40: ++ case FLASH_5761VENDOR_ST_A_M45PE80: ++ case FLASH_5761VENDOR_ST_A_M45PE16: ++ case FLASH_5761VENDOR_ST_M_M45PE20: ++ case FLASH_5761VENDOR_ST_M_M45PE40: ++ case FLASH_5761VENDOR_ST_M_M45PE80: ++ case FLASH_5761VENDOR_ST_M_M45PE16: ++ tp->nvram_jedecnum = JEDEC_ST; ++ tg3_flag_set(tp, NVRAM_BUFFERED); ++ tg3_flag_set(tp, FLASH); ++ tp->nvram_pagesize = 256; ++ break; ++ } ++ ++ if (protect) { ++ tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT); ++ } else { ++ switch (nvcfg1) { ++ case FLASH_5761VENDOR_ATMEL_ADB161D: ++ case FLASH_5761VENDOR_ATMEL_MDB161D: ++ case FLASH_5761VENDOR_ST_A_M45PE16: ++ case FLASH_5761VENDOR_ST_M_M45PE16: ++ tp->nvram_size = TG3_NVRAM_SIZE_2MB; ++ break; ++ case FLASH_5761VENDOR_ATMEL_ADB081D: ++ case FLASH_5761VENDOR_ATMEL_MDB081D: ++ case FLASH_5761VENDOR_ST_A_M45PE80: ++ case FLASH_5761VENDOR_ST_M_M45PE80: ++ tp->nvram_size = TG3_NVRAM_SIZE_1MB; ++ break; ++ case FLASH_5761VENDOR_ATMEL_ADB041D: ++ case FLASH_5761VENDOR_ATMEL_MDB041D: ++ case FLASH_5761VENDOR_ST_A_M45PE40: ++ case FLASH_5761VENDOR_ST_M_M45PE40: ++ tp->nvram_size = TG3_NVRAM_SIZE_512KB; ++ break; ++ case FLASH_5761VENDOR_ATMEL_ADB021D: ++ case FLASH_5761VENDOR_ATMEL_MDB021D: ++ case FLASH_5761VENDOR_ST_A_M45PE20: ++ case FLASH_5761VENDOR_ST_M_M45PE20: ++ tp->nvram_size = TG3_NVRAM_SIZE_256KB; ++ break; ++ } ++ } ++} ++ ++static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp) ++{ ++ tp->nvram_jedecnum = JEDEC_ATMEL; ++ tg3_flag_set(tp, NVRAM_BUFFERED); ++ tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; ++} ++ ++static void __devinit tg3_get_57780_nvram_info(struct tg3 *tp) ++{ ++ u32 nvcfg1; ++ ++ nvcfg1 = tr32(NVRAM_CFG1); ++ ++ switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { ++ case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ: ++ case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ: ++ tp->nvram_jedecnum = JEDEC_ATMEL; ++ tg3_flag_set(tp, NVRAM_BUFFERED); ++ tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; ++ ++ nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS; ++ tw32(NVRAM_CFG1, nvcfg1); ++ return; ++ case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED: ++ case FLASH_57780VENDOR_ATMEL_AT45DB011D: ++ case FLASH_57780VENDOR_ATMEL_AT45DB011B: ++ case FLASH_57780VENDOR_ATMEL_AT45DB021D: ++ case FLASH_57780VENDOR_ATMEL_AT45DB021B: ++ case FLASH_57780VENDOR_ATMEL_AT45DB041D: ++ case FLASH_57780VENDOR_ATMEL_AT45DB041B: ++ tp->nvram_jedecnum = JEDEC_ATMEL; ++ tg3_flag_set(tp, NVRAM_BUFFERED); ++ tg3_flag_set(tp, FLASH); ++ ++ switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { ++ case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED: ++ case FLASH_57780VENDOR_ATMEL_AT45DB011D: ++ case FLASH_57780VENDOR_ATMEL_AT45DB011B: ++ tp->nvram_size = TG3_NVRAM_SIZE_128KB; ++ break; ++ case FLASH_57780VENDOR_ATMEL_AT45DB021D: ++ case FLASH_57780VENDOR_ATMEL_AT45DB021B: ++ tp->nvram_size = TG3_NVRAM_SIZE_256KB; ++ break; ++ case FLASH_57780VENDOR_ATMEL_AT45DB041D: ++ case FLASH_57780VENDOR_ATMEL_AT45DB041B: ++ tp->nvram_size = TG3_NVRAM_SIZE_512KB; ++ break; ++ } ++ break; ++ case FLASH_5752VENDOR_ST_M45PE10: ++ case FLASH_5752VENDOR_ST_M45PE20: ++ case FLASH_5752VENDOR_ST_M45PE40: ++ tp->nvram_jedecnum = JEDEC_ST; ++ tg3_flag_set(tp, NVRAM_BUFFERED); ++ tg3_flag_set(tp, FLASH); ++ ++ switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { ++ case FLASH_5752VENDOR_ST_M45PE10: ++ tp->nvram_size = TG3_NVRAM_SIZE_128KB; ++ break; ++ case FLASH_5752VENDOR_ST_M45PE20: ++ tp->nvram_size = TG3_NVRAM_SIZE_256KB; ++ break; ++ case FLASH_5752VENDOR_ST_M45PE40: ++ tp->nvram_size = TG3_NVRAM_SIZE_512KB; ++ break; ++ } ++ break; ++ default: ++ tg3_flag_set(tp, NO_NVRAM); ++ return; ++ } ++ ++ tg3_nvram_get_pagesize(tp, nvcfg1); ++ if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528) ++ tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS); ++} ++ ++ ++static void __devinit tg3_get_5717_nvram_info(struct tg3 *tp) ++{ ++ u32 nvcfg1; ++ ++ nvcfg1 = tr32(NVRAM_CFG1); ++ ++ switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { ++ case FLASH_5717VENDOR_ATMEL_EEPROM: ++ case FLASH_5717VENDOR_MICRO_EEPROM: ++ tp->nvram_jedecnum = JEDEC_ATMEL; ++ tg3_flag_set(tp, NVRAM_BUFFERED); ++ tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; ++ ++ nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS; ++ tw32(NVRAM_CFG1, nvcfg1); ++ return; ++ case FLASH_5717VENDOR_ATMEL_MDB011D: ++ case FLASH_5717VENDOR_ATMEL_ADB011B: ++ case FLASH_5717VENDOR_ATMEL_ADB011D: ++ case FLASH_5717VENDOR_ATMEL_MDB021D: ++ case FLASH_5717VENDOR_ATMEL_ADB021B: ++ case FLASH_5717VENDOR_ATMEL_ADB021D: ++ case FLASH_5717VENDOR_ATMEL_45USPT: ++ tp->nvram_jedecnum = JEDEC_ATMEL; ++ tg3_flag_set(tp, NVRAM_BUFFERED); ++ tg3_flag_set(tp, FLASH); ++ ++ switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { ++ case FLASH_5717VENDOR_ATMEL_MDB021D: ++ /* Detect size with tg3_nvram_get_size() */ ++ break; ++ case FLASH_5717VENDOR_ATMEL_ADB021B: ++ case FLASH_5717VENDOR_ATMEL_ADB021D: ++ tp->nvram_size = TG3_NVRAM_SIZE_256KB; ++ break; ++ default: ++ tp->nvram_size = TG3_NVRAM_SIZE_128KB; ++ break; ++ } ++ break; ++ case FLASH_5717VENDOR_ST_M_M25PE10: ++ case FLASH_5717VENDOR_ST_A_M25PE10: ++ case FLASH_5717VENDOR_ST_M_M45PE10: ++ case FLASH_5717VENDOR_ST_A_M45PE10: ++ case FLASH_5717VENDOR_ST_M_M25PE20: ++ case FLASH_5717VENDOR_ST_A_M25PE20: ++ case FLASH_5717VENDOR_ST_M_M45PE20: ++ case FLASH_5717VENDOR_ST_A_M45PE20: ++ case FLASH_5717VENDOR_ST_25USPT: ++ case FLASH_5717VENDOR_ST_45USPT: ++ tp->nvram_jedecnum = JEDEC_ST; ++ tg3_flag_set(tp, NVRAM_BUFFERED); ++ tg3_flag_set(tp, FLASH); ++ ++ switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { ++ case FLASH_5717VENDOR_ST_M_M25PE20: ++ case FLASH_5717VENDOR_ST_M_M45PE20: ++ /* Detect size with tg3_nvram_get_size() */ ++ break; ++ case FLASH_5717VENDOR_ST_A_M25PE20: ++ case FLASH_5717VENDOR_ST_A_M45PE20: ++ tp->nvram_size = TG3_NVRAM_SIZE_256KB; ++ break; ++ default: ++ tp->nvram_size = TG3_NVRAM_SIZE_128KB; ++ break; ++ } ++ break; ++ default: ++ tg3_flag_set(tp, NO_NVRAM); ++ return; ++ } ++ ++ tg3_nvram_get_pagesize(tp, nvcfg1); ++ if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528) ++ tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS); ++} ++ ++static void __devinit tg3_get_5720_nvram_info(struct tg3 *tp) ++{ ++ u32 nvcfg1, nvmpinstrp, nv_status; ++ ++ nvcfg1 = tr32(NVRAM_CFG1); ++ nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK; ++ ++ if (tg3_asic_rev(tp) == ASIC_REV_5762) { ++ if (!(nvcfg1 & NVRAM_CFG1_5762VENDOR_MASK)) { ++ tg3_flag_set(tp, NO_NVRAM); ++ return; ++ } ++ ++ switch (nvmpinstrp) { ++ case FLASH_5762_MX25L_100: ++ case FLASH_5762_MX25L_200: ++ case FLASH_5762_MX25L_400: ++ case FLASH_5762_MX25L_800: ++ case FLASH_5762_MX25L_160_320: ++ tp->nvram_pagesize = 4096; ++ tp->nvram_jedecnum = JEDEC_MACRONIX; ++ tg3_flag_set(tp, NVRAM_BUFFERED); ++ tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS); ++ tg3_flag_set(tp, FLASH); ++ nv_status = tr32(NVRAM_AUTOSENSE_STATUS); ++ tp->nvram_size = ++ (1 << (nv_status >> AUTOSENSE_DEVID & ++ AUTOSENSE_DEVID_MASK) ++ << AUTOSENSE_SIZE_IN_MB); ++ return; ++ ++ case FLASH_5762_EEPROM_HD: ++ nvmpinstrp = FLASH_5720_EEPROM_HD; ++ break; ++ case FLASH_5762_EEPROM_LD: ++ nvmpinstrp = FLASH_5720_EEPROM_LD; ++ break; ++ case FLASH_5720VENDOR_M_ST_M45PE20: ++ /* This pinstrap supports multiple sizes, so force it ++ * to read the actual size from location 0xf0. ++ */ ++ nvmpinstrp = FLASH_5720VENDOR_ST_45USPT; ++ break; ++ } ++ } ++ ++ switch (nvmpinstrp) { ++ case FLASH_5720_EEPROM_HD: ++ case FLASH_5720_EEPROM_LD: ++ tp->nvram_jedecnum = JEDEC_ATMEL; ++ tg3_flag_set(tp, NVRAM_BUFFERED); ++ ++ nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS; ++ tw32(NVRAM_CFG1, nvcfg1); ++ if (nvmpinstrp == FLASH_5720_EEPROM_HD) ++ tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; ++ else ++ tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE; ++ return; ++ case FLASH_5720VENDOR_M_ATMEL_DB011D: ++ case FLASH_5720VENDOR_A_ATMEL_DB011B: ++ case FLASH_5720VENDOR_A_ATMEL_DB011D: ++ case FLASH_5720VENDOR_M_ATMEL_DB021D: ++ case FLASH_5720VENDOR_A_ATMEL_DB021B: ++ case FLASH_5720VENDOR_A_ATMEL_DB021D: ++ case FLASH_5720VENDOR_M_ATMEL_DB041D: ++ case FLASH_5720VENDOR_A_ATMEL_DB041B: ++ case FLASH_5720VENDOR_A_ATMEL_DB041D: ++ case FLASH_5720VENDOR_M_ATMEL_DB081D: ++ case FLASH_5720VENDOR_A_ATMEL_DB081D: ++ case FLASH_5720VENDOR_ATMEL_45USPT: ++ tp->nvram_jedecnum = JEDEC_ATMEL; ++ tg3_flag_set(tp, NVRAM_BUFFERED); ++ tg3_flag_set(tp, FLASH); ++ ++ switch (nvmpinstrp) { ++ case FLASH_5720VENDOR_M_ATMEL_DB021D: ++ case FLASH_5720VENDOR_A_ATMEL_DB021B: ++ case FLASH_5720VENDOR_A_ATMEL_DB021D: ++ tp->nvram_size = TG3_NVRAM_SIZE_256KB; ++ break; ++ case FLASH_5720VENDOR_M_ATMEL_DB041D: ++ case FLASH_5720VENDOR_A_ATMEL_DB041B: ++ case FLASH_5720VENDOR_A_ATMEL_DB041D: ++ tp->nvram_size = TG3_NVRAM_SIZE_512KB; ++ break; ++ case FLASH_5720VENDOR_M_ATMEL_DB081D: ++ case FLASH_5720VENDOR_A_ATMEL_DB081D: ++ tp->nvram_size = TG3_NVRAM_SIZE_1MB; ++ break; ++ default: ++ if (tg3_asic_rev(tp) != ASIC_REV_5762) ++ tp->nvram_size = TG3_NVRAM_SIZE_128KB; ++ break; ++ } ++ break; ++ case FLASH_5720VENDOR_M_ST_M25PE10: ++ case FLASH_5720VENDOR_M_ST_M45PE10: ++ case FLASH_5720VENDOR_A_ST_M25PE10: ++ case FLASH_5720VENDOR_A_ST_M45PE10: ++ case FLASH_5720VENDOR_M_ST_M25PE20: ++ case FLASH_5720VENDOR_M_ST_M45PE20: ++ case FLASH_5720VENDOR_A_ST_M25PE20: ++ case FLASH_5720VENDOR_A_ST_M45PE20: ++ case FLASH_5720VENDOR_M_ST_M25PE40: ++ case FLASH_5720VENDOR_M_ST_M45PE40: ++ case FLASH_5720VENDOR_A_ST_M25PE40: ++ case FLASH_5720VENDOR_A_ST_M45PE40: ++ case FLASH_5720VENDOR_M_ST_M25PE80: ++ case FLASH_5720VENDOR_M_ST_M45PE80: ++ case FLASH_5720VENDOR_A_ST_M25PE80: ++ case FLASH_5720VENDOR_A_ST_M45PE80: ++ case FLASH_5720VENDOR_ST_25USPT: ++ case FLASH_5720VENDOR_ST_45USPT: ++ tp->nvram_jedecnum = JEDEC_ST; ++ tg3_flag_set(tp, NVRAM_BUFFERED); ++ tg3_flag_set(tp, FLASH); ++ ++ switch (nvmpinstrp) { ++ case FLASH_5720VENDOR_M_ST_M25PE20: ++ case FLASH_5720VENDOR_M_ST_M45PE20: ++ case FLASH_5720VENDOR_A_ST_M25PE20: ++ case FLASH_5720VENDOR_A_ST_M45PE20: ++ tp->nvram_size = TG3_NVRAM_SIZE_256KB; ++ break; ++ case FLASH_5720VENDOR_M_ST_M25PE40: ++ case FLASH_5720VENDOR_M_ST_M45PE40: ++ case FLASH_5720VENDOR_A_ST_M25PE40: ++ case FLASH_5720VENDOR_A_ST_M45PE40: ++ tp->nvram_size = TG3_NVRAM_SIZE_512KB; ++ break; ++ case FLASH_5720VENDOR_M_ST_M25PE80: ++ case FLASH_5720VENDOR_M_ST_M45PE80: ++ case FLASH_5720VENDOR_A_ST_M25PE80: ++ case FLASH_5720VENDOR_A_ST_M45PE80: ++ tp->nvram_size = TG3_NVRAM_SIZE_1MB; ++ break; ++ default: ++ if (tg3_asic_rev(tp) != ASIC_REV_5762) ++ tp->nvram_size = TG3_NVRAM_SIZE_128KB; ++ break; ++ } ++ break; ++ default: ++ tg3_flag_set(tp, NO_NVRAM); ++ return; ++ } ++ ++ tg3_nvram_get_pagesize(tp, nvcfg1); ++ if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528) ++ tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS); ++ ++ if (tg3_asic_rev(tp) == ASIC_REV_5762) { ++ u32 val; ++ ++ if (tg3_nvram_read(tp, 0, &val)) ++ return; ++ ++ if (val != TG3_EEPROM_MAGIC && ++ (val & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) ++ tg3_flag_set(tp, NO_NVRAM); ++ } ++} ++ ++/* Chips other than 5700/5701 use the NVRAM for fetching info. */ ++static void __devinit tg3_nvram_init(struct tg3 *tp) ++{ ++ if (tg3_flag(tp, IS_SSB_CORE)) { ++ /* No NVRAM and EEPROM on the SSB Broadcom GigE core. */ ++ tg3_flag_clear(tp, NVRAM); ++ tg3_flag_clear(tp, NVRAM_BUFFERED); ++ tg3_flag_set(tp, NO_NVRAM); ++ return; ++ } ++ ++ tw32_f(GRC_EEPROM_ADDR, ++ (EEPROM_ADDR_FSM_RESET | ++ (EEPROM_DEFAULT_CLOCK_PERIOD << ++ EEPROM_ADDR_CLKPERD_SHIFT))); ++ ++ msleep(1); ++ ++ /* Enable seeprom accesses. */ ++ tw32_f(GRC_LOCAL_CTRL, ++ tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM); ++ udelay(100); ++ ++ if (tg3_asic_rev(tp) != ASIC_REV_5700 && ++ tg3_asic_rev(tp) != ASIC_REV_5701) { ++ tg3_flag_set(tp, NVRAM); ++ ++ if (tg3_nvram_lock(tp)) { ++ netdev_warn(tp->dev, ++ "Cannot get nvram lock, %s failed\n", ++ __func__); ++ return; ++ } ++ tg3_enable_nvram_access(tp); ++ ++ tp->nvram_size = 0; ++ ++ if (tg3_asic_rev(tp) == ASIC_REV_5752) ++ tg3_get_5752_nvram_info(tp); ++ else if (tg3_asic_rev(tp) == ASIC_REV_5755) ++ tg3_get_5755_nvram_info(tp); ++ else if (tg3_asic_rev(tp) == ASIC_REV_5787 || ++ tg3_asic_rev(tp) == ASIC_REV_5784 || ++ tg3_asic_rev(tp) == ASIC_REV_5785) ++ tg3_get_5787_nvram_info(tp); ++ else if (tg3_asic_rev(tp) == ASIC_REV_5761) ++ tg3_get_5761_nvram_info(tp); ++ else if (tg3_asic_rev(tp) == ASIC_REV_5906) ++ tg3_get_5906_nvram_info(tp); ++ else if (tg3_asic_rev(tp) == ASIC_REV_57780 || ++ tg3_flag(tp, 57765_CLASS)) ++ tg3_get_57780_nvram_info(tp); ++ else if (tg3_asic_rev(tp) == ASIC_REV_5717 || ++ tg3_asic_rev(tp) == ASIC_REV_5719) ++ tg3_get_5717_nvram_info(tp); ++ else if (tg3_asic_rev(tp) == ASIC_REV_5720 || ++ tg3_asic_rev(tp) == ASIC_REV_5762) ++ tg3_get_5720_nvram_info(tp); ++ else ++ tg3_get_nvram_info(tp); ++ ++ if (tp->nvram_size == 0) ++ tg3_get_nvram_size(tp); ++ ++ tg3_disable_nvram_access(tp); ++ tg3_nvram_unlock(tp); ++ ++ } else { ++ tg3_flag_clear(tp, NVRAM); ++ tg3_flag_clear(tp, NVRAM_BUFFERED); ++ ++ tg3_get_eeprom_size(tp); ++ } ++} ++ ++struct subsys_tbl_ent { ++ u16 subsys_vendor, subsys_devid; ++ u32 phy_id; ++}; ++ ++static struct subsys_tbl_ent subsys_id_to_phy_id[] __devinitdata = { ++ /* Broadcom boards. */ ++ { TG3PCI_SUBVENDOR_ID_BROADCOM, ++ TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 }, ++ { TG3PCI_SUBVENDOR_ID_BROADCOM, ++ TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 }, ++ { TG3PCI_SUBVENDOR_ID_BROADCOM, ++ TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 }, ++ { TG3PCI_SUBVENDOR_ID_BROADCOM, ++ TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 }, ++ { TG3PCI_SUBVENDOR_ID_BROADCOM, ++ TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 }, ++ { TG3PCI_SUBVENDOR_ID_BROADCOM, ++ TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 }, ++ { TG3PCI_SUBVENDOR_ID_BROADCOM, ++ TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 }, ++ { TG3PCI_SUBVENDOR_ID_BROADCOM, ++ TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 }, ++ { TG3PCI_SUBVENDOR_ID_BROADCOM, ++ TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 }, ++ { TG3PCI_SUBVENDOR_ID_BROADCOM, ++ TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 }, ++ { TG3PCI_SUBVENDOR_ID_BROADCOM, ++ TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 }, ++ ++ /* 3com boards. */ ++ { TG3PCI_SUBVENDOR_ID_3COM, ++ TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 }, ++ { TG3PCI_SUBVENDOR_ID_3COM, ++ TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 }, ++ { TG3PCI_SUBVENDOR_ID_3COM, ++ TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 }, ++ { TG3PCI_SUBVENDOR_ID_3COM, ++ TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 }, ++ { TG3PCI_SUBVENDOR_ID_3COM, ++ TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 }, ++ ++ /* DELL boards. */ ++ { TG3PCI_SUBVENDOR_ID_DELL, ++ TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 }, ++ { TG3PCI_SUBVENDOR_ID_DELL, ++ TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 }, ++ { TG3PCI_SUBVENDOR_ID_DELL, ++ TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 }, ++ { TG3PCI_SUBVENDOR_ID_DELL, ++ TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 }, ++ ++ /* Compaq boards. */ ++ { TG3PCI_SUBVENDOR_ID_COMPAQ, ++ TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 }, ++ { TG3PCI_SUBVENDOR_ID_COMPAQ, ++ TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 }, ++ { TG3PCI_SUBVENDOR_ID_COMPAQ, ++ TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 }, ++ { TG3PCI_SUBVENDOR_ID_COMPAQ, ++ TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 }, ++ { TG3PCI_SUBVENDOR_ID_COMPAQ, ++ TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 }, ++ ++ /* IBM boards. */ ++ { TG3PCI_SUBVENDOR_ID_IBM, ++ TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 } ++}; ++ ++static struct subsys_tbl_ent * __devinit tg3_lookup_by_subsys(struct tg3 *tp) ++{ ++ int i; ++ ++ for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) { ++ if ((subsys_id_to_phy_id[i].subsys_vendor == ++ tp->pdev->subsystem_vendor) && ++ (subsys_id_to_phy_id[i].subsys_devid == ++ tp->pdev->subsystem_device)) ++ return &subsys_id_to_phy_id[i]; ++ } ++ return NULL; ++} ++ ++static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp) ++{ ++ u32 val; ++ ++ tp->phy_id = TG3_PHY_ID_INVALID; ++ tp->led_ctrl = LED_CTRL_MODE_PHY_1; ++ ++ /* Assume an onboard device and WOL capable by default. */ ++ tg3_flag_set(tp, EEPROM_WRITE_PROT); ++ tg3_flag_set(tp, WOL_CAP); ++ ++ if (tg3_asic_rev(tp) == ASIC_REV_5906) { ++ if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) { ++ tg3_flag_clear(tp, EEPROM_WRITE_PROT); ++ tg3_flag_set(tp, IS_NIC); ++ } ++ val = tr32(VCPU_CFGSHDW); ++ if (val & VCPU_CFGSHDW_ASPM_DBNC) ++ tg3_flag_set(tp, ASPM_WORKAROUND); ++ if ((val & VCPU_CFGSHDW_WOL_ENABLE) && ++ (val & VCPU_CFGSHDW_WOL_MAGPKT)) ++ tg3_flag_set(tp, WOL_ENABLE); ++ goto done; ++ } ++ ++ tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val); ++ if (val == NIC_SRAM_DATA_SIG_MAGIC) { ++ u32 nic_cfg, led_cfg; ++ u32 cfg2 = 0, cfg4 = 0, cfg5 = 0; ++ u32 nic_phy_id, ver, eeprom_phy_id; ++ int eeprom_phy_serdes = 0; ++ ++ tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg); ++ tp->nic_sram_data_cfg = nic_cfg; ++ ++ tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver); ++ ver >>= NIC_SRAM_DATA_VER_SHIFT; ++ if (tg3_asic_rev(tp) != ASIC_REV_5700 && ++ tg3_asic_rev(tp) != ASIC_REV_5701 && ++ tg3_asic_rev(tp) != ASIC_REV_5703 && ++ (ver > 0) && (ver < 0x100)) ++ tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2); ++ ++ if (tg3_asic_rev(tp) == ASIC_REV_5785) ++ tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4); ++ ++ if (tg3_asic_rev(tp) == ASIC_REV_5717 || ++ tg3_asic_rev(tp) == ASIC_REV_5719 || ++ tg3_asic_rev(tp) == ASIC_REV_5720) ++ tg3_read_mem(tp, NIC_SRAM_DATA_CFG_5, &cfg5); ++ ++ if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) == ++ NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER) ++ eeprom_phy_serdes = 1; ++ ++ tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id); ++ if (nic_phy_id != 0) { ++ u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK; ++ u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK; ++ ++ eeprom_phy_id = (id1 >> 16) << 10; ++ eeprom_phy_id |= (id2 & 0xfc00) << 16; ++ eeprom_phy_id |= (id2 & 0x03ff) << 0; ++ } else ++ eeprom_phy_id = 0; ++ ++ tp->phy_id = eeprom_phy_id; ++ if (eeprom_phy_serdes) { ++ if (!tg3_flag(tp, 5705_PLUS)) ++ tp->phy_flags |= TG3_PHYFLG_PHY_SERDES; ++ else ++ tp->phy_flags |= TG3_PHYFLG_MII_SERDES; ++ } ++ ++ if (tg3_flag(tp, 5750_PLUS)) ++ led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK | ++ SHASTA_EXT_LED_MODE_MASK); ++ else ++ led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK; ++ ++ switch (led_cfg) { ++ default: ++ case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1: ++ tp->led_ctrl = LED_CTRL_MODE_PHY_1; ++ break; ++ ++ case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2: ++ tp->led_ctrl = LED_CTRL_MODE_PHY_2; ++ break; ++ ++ case NIC_SRAM_DATA_CFG_LED_MODE_MAC: ++ tp->led_ctrl = LED_CTRL_MODE_MAC; ++ ++ /* Default to PHY_1_MODE if 0 (MAC_MODE) is ++ * read on some older 5700/5701 bootcode. ++ */ ++ if (tg3_asic_rev(tp) == ASIC_REV_5700 || ++ tg3_asic_rev(tp) == ASIC_REV_5701) ++ tp->led_ctrl = LED_CTRL_MODE_PHY_1; ++ ++ break; ++ ++ case SHASTA_EXT_LED_SHARED: ++ tp->led_ctrl = LED_CTRL_MODE_SHARED; ++ if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 && ++ tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A1) ++ tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 | ++ LED_CTRL_MODE_PHY_2); ++ ++ if (tg3_asic_rev(tp) == ASIC_REV_5717 || ++ tg3_asic_rev(tp) == ASIC_REV_5719 || ++ tg3_asic_rev(tp) == ASIC_REV_5720 || ++ tg3_asic_rev(tp) == ASIC_REV_5762) ++ tp->led_ctrl |= 0xfff80000; ++ ++ break; ++ ++ case SHASTA_EXT_LED_MAC: ++ tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC; ++ break; ++ ++ case SHASTA_EXT_LED_COMBO: ++ tp->led_ctrl = LED_CTRL_MODE_COMBO; ++ if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0) ++ tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 | ++ LED_CTRL_MODE_PHY_2); ++ break; ++ ++ } ++ ++ if ((tg3_asic_rev(tp) == ASIC_REV_5700 || ++ tg3_asic_rev(tp) == ASIC_REV_5701) && ++ tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL) ++ tp->led_ctrl = LED_CTRL_MODE_PHY_2; ++ ++ if (tg3_chip_rev(tp) == CHIPREV_5784_AX) ++ tp->led_ctrl = LED_CTRL_MODE_PHY_1; ++ ++ if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) { ++ tg3_flag_set(tp, EEPROM_WRITE_PROT); ++ if ((tp->pdev->subsystem_vendor == ++ PCI_VENDOR_ID_ARIMA) && ++ (tp->pdev->subsystem_device == 0x205a || ++ tp->pdev->subsystem_device == 0x2063)) ++ tg3_flag_clear(tp, EEPROM_WRITE_PROT); ++ } else { ++ tg3_flag_clear(tp, EEPROM_WRITE_PROT); ++ tg3_flag_set(tp, IS_NIC); ++ } ++ ++ if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) { ++ tg3_flag_set(tp, ENABLE_ASF); ++ if (tg3_flag(tp, 5750_PLUS)) ++ tg3_flag_set(tp, ASF_NEW_HANDSHAKE); ++ } ++ ++ if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) && ++ tg3_flag(tp, 5750_PLUS)) ++ tg3_flag_set(tp, ENABLE_APE); ++ ++ if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES && ++ !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL)) ++ tg3_flag_clear(tp, WOL_CAP); ++ ++ if (tg3_flag(tp, WOL_CAP) && ++ (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) ++ tg3_flag_set(tp, WOL_ENABLE); ++ ++ if (cfg2 & (1 << 17)) ++ tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING; ++ ++ /* serdes signal pre-emphasis in register 0x590 set by */ ++ /* bootcode if bit 18 is set */ ++ if (cfg2 & (1 << 18)) ++ tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS; ++ ++ if ((tg3_flag(tp, 57765_PLUS) || ++ tg3_asic_rev(tp) == ASIC_REV_5785 || ++ (tg3_asic_rev(tp) == ASIC_REV_5784 && ++ tg3_chip_rev(tp) != CHIPREV_5784_AX)) && ++ (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN)) ++ tp->phy_flags |= TG3_PHYFLG_ENABLE_APD; ++ ++ if (tg3_flag(tp, PCI_EXPRESS)) { ++ u32 cfg3; ++ ++ tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3); ++ if (tg3_asic_rev(tp) != ASIC_REV_5785 && ++ !tg3_flag(tp, 57765_PLUS) && ++ (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)) ++ tg3_flag_set(tp, ASPM_WORKAROUND); ++ if (cfg3 & NIC_SRAM_LNK_FLAP_AVOID) ++ tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN; ++ if (cfg3 & NIC_SRAM_1G_ON_VAUX_OK) ++ tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK; ++ } ++ ++ if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE) ++ tg3_flag_set(tp, RGMII_INBAND_DISABLE); ++ if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN) ++ tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN); ++ if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN) ++ tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN); ++ ++ if (cfg5 & NIC_SRAM_DISABLE_1G_HALF_ADV) ++ tp->phy_flags |= TG3_PHYFLG_DISABLE_1G_HD_ADV; ++ } ++done: ++ ++#ifndef BCM_HAS_DEVICE_SET_WAKEUP_CAPABLE ++ device_init_wakeup(&tp->pdev->dev, tg3_flag(tp, WOL_CAP)); ++#endif ++ ++ if (tg3_flag(tp, WOL_CAP)) ++ device_set_wakeup_enable(&tp->pdev->dev, ++ tg3_flag(tp, WOL_ENABLE)); ++ else ++ device_set_wakeup_capable(&tp->pdev->dev, false); ++} ++ ++static int __devinit tg3_ape_otp_read(struct tg3 *tp, u32 offset, u32 *val) ++{ ++ int i, err; ++ u32 val2, off = offset * 8; ++ ++ err = tg3_nvram_lock(tp); ++ if (err) ++ return err; ++ ++ tg3_ape_write32(tp, TG3_APE_OTP_ADDR, off | APE_OTP_ADDR_CPU_ENABLE); ++ tg3_ape_write32(tp, TG3_APE_OTP_CTRL, APE_OTP_CTRL_PROG_EN | ++ APE_OTP_CTRL_CMD_RD | APE_OTP_CTRL_START); ++ tg3_ape_read32(tp, TG3_APE_OTP_CTRL); ++ udelay(10); ++ ++ for (i = 0; i < 100; i++) { ++ val2 = tg3_ape_read32(tp, TG3_APE_OTP_STATUS); ++ if (val2 & APE_OTP_STATUS_CMD_DONE) { ++ *val = tg3_ape_read32(tp, TG3_APE_OTP_RD_DATA); ++ break; ++ } ++ udelay(10); ++ } ++ ++ tg3_ape_write32(tp, TG3_APE_OTP_CTRL, 0); ++ ++ tg3_nvram_unlock(tp); ++ if (val2 & APE_OTP_STATUS_CMD_DONE) ++ return 0; ++ ++ return -EBUSY; ++} ++ ++static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd) ++{ ++ int i; ++ u32 val; ++ ++ tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START); ++ tw32(OTP_CTRL, cmd); ++ ++ /* Wait for up to 1 ms for command to execute. */ ++ for (i = 0; i < 100; i++) { ++ val = tr32(OTP_STATUS); ++ if (val & OTP_STATUS_CMD_DONE) ++ break; ++ udelay(10); ++ } ++ ++ return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY; ++} ++ ++/* Read the gphy configuration from the OTP region of the chip. The gphy ++ * configuration is a 32-bit value that straddles the alignment boundary. ++ * We do two 32-bit reads and then shift and merge the results. ++ */ ++static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp) ++{ ++ u32 bhalf_otp, thalf_otp; ++ ++ tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC); ++ ++ if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT)) ++ return 0; ++ ++ tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1); ++ ++ if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ)) ++ return 0; ++ ++ thalf_otp = tr32(OTP_READ_DATA); ++ ++ tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2); ++ ++ if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ)) ++ return 0; ++ ++ bhalf_otp = tr32(OTP_READ_DATA); ++ ++ return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16); ++} ++ ++static void __devinit tg3_phy_init_link_config(struct tg3 *tp) ++{ ++ u32 adv = ADVERTISED_Autoneg; ++ ++ if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) { ++ if (!(tp->phy_flags & TG3_PHYFLG_DISABLE_1G_HD_ADV)) ++ adv |= ADVERTISED_1000baseT_Half; ++ adv |= ADVERTISED_1000baseT_Full; ++ } ++ ++ if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) ++ adv |= ADVERTISED_100baseT_Half | ++ ADVERTISED_100baseT_Full | ++ ADVERTISED_10baseT_Half | ++ ADVERTISED_10baseT_Full | ++ ADVERTISED_TP; ++ else ++ adv |= ADVERTISED_FIBRE; ++ ++ tp->link_config.advertising = adv; ++ tp->link_config.speed = SPEED_UNKNOWN; ++ tp->link_config.duplex = DUPLEX_UNKNOWN; ++ tp->link_config.autoneg = AUTONEG_ENABLE; ++ tp->link_config.active_speed = SPEED_UNKNOWN; ++ tp->link_config.active_duplex = DUPLEX_UNKNOWN; ++ ++ tp->old_link = -1; ++} ++ ++static int __devinit tg3_phy_probe(struct tg3 *tp) ++{ ++ u32 hw_phy_id_1, hw_phy_id_2; ++ u32 hw_phy_id, hw_phy_id_masked; ++ int err; ++ ++ /* flow control autonegotiation is default behavior */ ++ tg3_flag_set(tp, PAUSE_AUTONEG); ++ tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX; ++ ++ if (tg3_flag(tp, ENABLE_APE)) { ++ switch (tp->pci_fn) { ++ case 0: ++ tp->phy_ape_lock = TG3_APE_LOCK_PHY0; ++ break; ++ case 1: ++ tp->phy_ape_lock = TG3_APE_LOCK_PHY1; ++ break; ++ case 2: ++ tp->phy_ape_lock = TG3_APE_LOCK_PHY2; ++ break; ++ case 3: ++ tp->phy_ape_lock = TG3_APE_LOCK_PHY3; ++ break; ++ } ++ } ++ ++ if (!tg3_flag(tp, ENABLE_ASF) && ++ !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) && ++ !(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) ++ tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK | ++ TG3_PHYFLG_KEEP_LINK_ON_PWRDN); ++ ++ if (tg3_flag(tp, USE_PHYLIB)) ++ return tg3_phy_init(tp); ++ ++ /* Reading the PHY ID register can conflict with ASF ++ * firmware access to the PHY hardware. ++ */ ++ err = 0; ++ if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) { ++ hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID; ++ } else { ++ /* Now read the physical PHY_ID from the chip and verify ++ * that it is sane. If it doesn't look good, we fall back ++ * to either the hard-coded table based PHY_ID and failing ++ * that the value found in the eeprom area. ++ */ ++ err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1); ++ err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2); ++ ++ hw_phy_id = (hw_phy_id_1 & 0xffff) << 10; ++ hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16; ++ hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0; ++ ++ hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK; ++ } ++ ++ if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) { ++ tp->phy_id = hw_phy_id; ++ if (hw_phy_id_masked == TG3_PHY_ID_BCM8002) ++ tp->phy_flags |= TG3_PHYFLG_PHY_SERDES; ++ else ++ tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES; ++ } else { ++ if (tp->phy_id != TG3_PHY_ID_INVALID) { ++ /* Do nothing, phy ID already set up in ++ * tg3_get_eeprom_hw_cfg(). ++ */ ++ } else { ++ struct subsys_tbl_ent *p; ++ ++ /* No eeprom signature? Try the hardcoded ++ * subsys device table. ++ */ ++ p = tg3_lookup_by_subsys(tp); ++ if (p) { ++ tp->phy_id = p->phy_id; ++ } else if (!tg3_flag(tp, IS_SSB_CORE)) { ++ /* For now we saw the IDs 0xbc050cd0, ++ * 0xbc050f80 and 0xbc050c30 on devices ++ * connected to an BCM4785 and there are ++ * probably more. Just assume that the phy is ++ * supported when it is connected to a SSB core ++ * for now. ++ */ ++ return -ENODEV; ++ } ++ ++ if (!tp->phy_id || ++ tp->phy_id == TG3_PHY_ID_BCM8002) ++ tp->phy_flags |= TG3_PHYFLG_PHY_SERDES; ++ } ++ } ++ ++ /* A0 */ ++ if (tg3_asic_rev(tp) == ASIC_REV_5785 && ++ tp->phy_id == TG3_PHY_ID_BCM50612E) { ++ tp->phy_flags &= ~TG3_PHYFLG_ENABLE_APD; ++ tg3_flag_clear(tp, RGMII_INBAND_DISABLE); ++ tg3_flag_clear(tp, RGMII_EXT_IBND_RX_EN); ++ tg3_flag_clear(tp, RGMII_EXT_IBND_TX_EN); ++ } ++ ++#ifndef TG3_DISABLE_EEE_SUPPORT ++ if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) && ++ (tg3_asic_rev(tp) == ASIC_REV_5719 || ++ tg3_asic_rev(tp) == ASIC_REV_5720 || ++ tg3_asic_rev(tp) == ASIC_REV_57766 || ++ tg3_asic_rev(tp) == ASIC_REV_5762 || ++ (tg3_asic_rev(tp) == ASIC_REV_5717 && ++ tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0) || ++ (tg3_asic_rev(tp) == ASIC_REV_57765 && ++ tg3_chip_rev_id(tp) != CHIPREV_ID_57765_A0))) { ++ tp->phy_flags |= TG3_PHYFLG_EEE_CAP; ++ ++ tp->eee.supported = SUPPORTED_100baseT_Full | ++ SUPPORTED_1000baseT_Full; ++ tp->eee.advertised = ADVERTISED_100baseT_Full | ++ ADVERTISED_1000baseT_Full; ++ tp->eee.eee_enabled = !tg3_disable_eee; ++ tp->eee.tx_lpi_enabled = 1; ++ tp->eee.tx_lpi_timer = TG3_CPMU_DBTMR1_LNKIDLE_2047US; ++ } ++#endif /* TG3_DISABLE_EEE_SUPPORT */ ++ ++ tg3_phy_init_link_config(tp); ++ ++ /* Bring the phy out of its low-power state. */ ++ if (!(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) && ++ !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) && ++ !tg3_flag(tp, ENABLE_APE) && !tg3_flag(tp, ENABLE_ASF)) ++ err = tg3_phy_reset(tp); ++ ++ return err; ++} ++ ++static void __devinit tg3_read_vpd(struct tg3 *tp) ++{ ++ u8 *vpd_data; ++ unsigned int block_end, rosize, len; ++ u32 vpdlen; ++ int j, i = 0; ++ ++ vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen); ++ if (!vpd_data) ++ goto out_no_vpd; ++ ++ i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA); ++ if (i < 0) ++ goto out_not_found; ++ ++ rosize = pci_vpd_lrdt_size(&vpd_data[i]); ++ block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize; ++ i += PCI_VPD_LRDT_TAG_SIZE; ++ ++ if (block_end > vpdlen) ++ goto out_not_found; ++ ++ j = pci_vpd_find_info_keyword(vpd_data, i, rosize, ++ PCI_VPD_RO_KEYWORD_MFR_ID); ++ if (j > 0) { ++ len = pci_vpd_info_field_size(&vpd_data[j]); ++ ++ j += PCI_VPD_INFO_FLD_HDR_SIZE; ++ if (j + len > block_end || len != 4 || ++ memcmp(&vpd_data[j], "1028", 4)) ++ goto partno; ++ ++ j = pci_vpd_find_info_keyword(vpd_data, i, rosize, ++ PCI_VPD_RO_KEYWORD_VENDOR0); ++ if (j < 0) ++ goto partno; ++ ++ len = pci_vpd_info_field_size(&vpd_data[j]); ++ ++ j += PCI_VPD_INFO_FLD_HDR_SIZE; ++ if (j + len > block_end) ++ goto partno; ++ ++ if (len >= sizeof(tp->fw_ver)) ++ len = sizeof(tp->fw_ver) - 1; ++ memset(tp->fw_ver, 0, sizeof(tp->fw_ver)); ++ snprintf(tp->fw_ver, sizeof(tp->fw_ver), "%.*s bc ", len, ++ &vpd_data[j]); ++ } ++ ++partno: ++ i = pci_vpd_find_info_keyword(vpd_data, i, rosize, ++ PCI_VPD_RO_KEYWORD_PARTNO); ++ if (i < 0) ++ goto out_not_found; ++ ++ len = pci_vpd_info_field_size(&vpd_data[i]); ++ ++ i += PCI_VPD_INFO_FLD_HDR_SIZE; ++ if (len > TG3_BPN_SIZE || ++ (len + i) > vpdlen) ++ goto out_not_found; ++ ++ memcpy(tp->board_part_number, &vpd_data[i], len); ++ ++out_not_found: ++ kfree(vpd_data); ++ if (tp->board_part_number[0]) ++ return; ++ ++out_no_vpd: ++ if (tg3_asic_rev(tp) == ASIC_REV_5717) { ++ if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 || ++ tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C) ++ strcpy(tp->board_part_number, "BCM5717"); ++ else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718) ++ strcpy(tp->board_part_number, "BCM5718"); ++ else ++ goto nomatch; ++ } else if (tg3_asic_rev(tp) == ASIC_REV_57780) { ++ if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780) ++ strcpy(tp->board_part_number, "BCM57780"); ++ else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760) ++ strcpy(tp->board_part_number, "BCM57760"); ++ else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790) ++ strcpy(tp->board_part_number, "BCM57790"); ++ else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788) ++ strcpy(tp->board_part_number, "BCM57788"); ++ else ++ goto nomatch; ++ } else if (tg3_asic_rev(tp) == ASIC_REV_57765) { ++ if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761) ++ strcpy(tp->board_part_number, "BCM57761"); ++ else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765) ++ strcpy(tp->board_part_number, "BCM57765"); ++ else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781) ++ strcpy(tp->board_part_number, "BCM57781"); ++ else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785) ++ strcpy(tp->board_part_number, "BCM57785"); ++ else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791) ++ strcpy(tp->board_part_number, "BCM57791"); ++ else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795) ++ strcpy(tp->board_part_number, "BCM57795"); ++ else ++ goto nomatch; ++ } else if (tg3_asic_rev(tp) == ASIC_REV_57766) { ++ if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762) ++ strcpy(tp->board_part_number, "BCM57762"); ++ else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766) ++ strcpy(tp->board_part_number, "BCM57766"); ++ else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782) ++ strcpy(tp->board_part_number, "BCM57782"); ++ else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786) ++ strcpy(tp->board_part_number, "BCM57786"); ++ else ++ goto nomatch; ++ } else if (tg3_asic_rev(tp) == ASIC_REV_5906) { ++ strcpy(tp->board_part_number, "BCM95906"); ++ } else { ++nomatch: ++ strcpy(tp->board_part_number, "none"); ++ } ++} ++ ++static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset) ++{ ++ u32 val; ++ ++ if (tg3_nvram_read(tp, offset, &val) || ++ (val & 0xfc000000) != 0x0c000000 || ++ tg3_nvram_read(tp, offset + 4, &val) || ++ val != 0) ++ return 0; ++ ++ return 1; ++} ++ ++static void __devinit tg3_read_bc_ver(struct tg3 *tp) ++{ ++ u32 val, offset, start, ver_offset; ++ int i, dst_off; ++ bool newver = false; ++ ++ if (tg3_nvram_read(tp, 0xc, &offset) || ++ tg3_nvram_read(tp, 0x4, &start)) ++ return; ++ ++ offset = tg3_nvram_logical_addr(tp, offset); ++ ++ if (tg3_nvram_read(tp, offset, &val)) ++ return; ++ ++ if ((val & 0xfc000000) == 0x0c000000) { ++ if (tg3_nvram_read(tp, offset + 4, &val)) ++ return; ++ ++ if (val == 0) ++ newver = true; ++ } ++ ++ dst_off = strlen(tp->fw_ver); ++ ++ if (newver) { ++ if (TG3_VER_SIZE - dst_off < 16 || ++ tg3_nvram_read(tp, offset + 8, &ver_offset)) ++ return; ++ ++ offset = offset + ver_offset - start; ++ for (i = 0; i < 16; i += 4) { ++ __be32 v; ++ if (tg3_nvram_read_be32(tp, offset + i, &v)) ++ return; ++ ++ memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v)); ++ } ++ } else { ++ u32 major, minor; ++ ++ if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset)) ++ return; ++ ++ major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >> ++ TG3_NVM_BCVER_MAJSFT; ++ minor = ver_offset & TG3_NVM_BCVER_MINMSK; ++ snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off, ++ "v%d.%02d", major, minor); ++ } ++} ++ ++static void __devinit tg3_read_hwsb_ver(struct tg3 *tp) ++{ ++ u32 val, major, minor; ++ ++ /* Use native endian representation */ ++ if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val)) ++ return; ++ ++ major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >> ++ TG3_NVM_HWSB_CFG1_MAJSFT; ++ minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >> ++ TG3_NVM_HWSB_CFG1_MINSFT; ++ ++ snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor); ++} ++ ++static void __devinit tg3_read_sb_ver(struct tg3 *tp, u32 val) ++{ ++ u32 offset, major, minor, build; ++ ++ strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1); ++ ++ if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1) ++ return; ++ ++ switch (val & TG3_EEPROM_SB_REVISION_MASK) { ++ case TG3_EEPROM_SB_REVISION_0: ++ offset = TG3_EEPROM_SB_F1R0_EDH_OFF; ++ break; ++ case TG3_EEPROM_SB_REVISION_2: ++ offset = TG3_EEPROM_SB_F1R2_EDH_OFF; ++ break; ++ case TG3_EEPROM_SB_REVISION_3: ++ offset = TG3_EEPROM_SB_F1R3_EDH_OFF; ++ break; ++ case TG3_EEPROM_SB_REVISION_4: ++ offset = TG3_EEPROM_SB_F1R4_EDH_OFF; ++ break; ++ case TG3_EEPROM_SB_REVISION_5: ++ offset = TG3_EEPROM_SB_F1R5_EDH_OFF; ++ break; ++ case TG3_EEPROM_SB_REVISION_6: ++ offset = TG3_EEPROM_SB_F1R6_EDH_OFF; ++ break; ++ default: ++ return; ++ } ++ ++ if (tg3_nvram_read(tp, offset, &val)) ++ return; ++ ++ build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >> ++ TG3_EEPROM_SB_EDH_BLD_SHFT; ++ major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >> ++ TG3_EEPROM_SB_EDH_MAJ_SHFT; ++ minor = val & TG3_EEPROM_SB_EDH_MIN_MASK; ++ ++ if (minor > 99 || build > 26) ++ return; ++ ++ offset = strlen(tp->fw_ver); ++ snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset, ++ " v%d.%02d", major, minor); ++ ++ if (build > 0) { ++ offset = strlen(tp->fw_ver); ++ if (offset < TG3_VER_SIZE - 1) ++ tp->fw_ver[offset] = 'a' + build - 1; ++ } ++} ++ ++static void __devinit tg3_read_mgmtfw_ver(struct tg3 *tp) ++{ ++ u32 val, offset, start; ++ int i, vlen; ++ ++ for (offset = TG3_NVM_DIR_START; ++ offset < TG3_NVM_DIR_END; ++ offset += TG3_NVM_DIRENT_SIZE) { ++ if (tg3_nvram_read(tp, offset, &val)) ++ return; ++ ++ if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI) ++ break; ++ } ++ ++ if (offset == TG3_NVM_DIR_END) ++ return; ++ ++ if (!tg3_flag(tp, 5705_PLUS)) ++ start = 0x08000000; ++ else if (tg3_nvram_read(tp, offset - 4, &start)) ++ return; ++ ++ if (tg3_nvram_read(tp, offset + 4, &offset) || ++ !tg3_fw_img_is_valid(tp, offset) || ++ tg3_nvram_read(tp, offset + 8, &val)) ++ return; ++ ++ offset += val - start; ++ ++ vlen = strlen(tp->fw_ver); ++ ++ tp->fw_ver[vlen++] = ','; ++ tp->fw_ver[vlen++] = ' '; ++ ++ for (i = 0; i < 4; i++) { ++ __be32 v; ++ if (tg3_nvram_read_be32(tp, offset, &v)) ++ return; ++ ++ offset += sizeof(v); ++ ++ if (vlen > TG3_VER_SIZE - sizeof(v)) { ++ memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen); ++ break; ++ } ++ ++ memcpy(&tp->fw_ver[vlen], &v, sizeof(v)); ++ vlen += sizeof(v); ++ } ++} ++ ++static void __devinit tg3_probe_ncsi(struct tg3 *tp) ++{ ++ u32 apedata; ++ ++ apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG); ++ if (apedata != APE_SEG_SIG_MAGIC) ++ return; ++ ++ apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS); ++ if (!(apedata & APE_FW_STATUS_READY)) ++ return; ++ ++ if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI) ++ tg3_flag_set(tp, APE_HAS_NCSI); ++} ++ ++static void __devinit tg3_read_dash_ver(struct tg3 *tp) ++{ ++ int vlen; ++ u32 apedata; ++ char *fwtype; ++ ++ apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION); ++ ++ if (tg3_flag(tp, APE_HAS_NCSI)) ++ fwtype = "NCSI"; ++ else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725) ++ fwtype = "SMASH"; ++ else ++ fwtype = "DASH"; ++ ++ vlen = strlen(tp->fw_ver); ++ ++ snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d", ++ fwtype, ++ (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT, ++ (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT, ++ (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT, ++ (apedata & APE_FW_VERSION_BLDMSK)); ++} ++ ++static void __devinit tg3_read_otp_ver(struct tg3 *tp) ++{ ++ u32 val, val2; ++ ++ if (tg3_asic_rev(tp) != ASIC_REV_5762) ++ return; ++ ++ if (!tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0, &val) && ++ !tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0 + 4, &val2) && ++ TG3_OTP_MAGIC0_VALID(val)) { ++ u64 val64 = (u64) val << 32 | val2; ++ u32 ver = 0; ++ int i, vlen; ++ ++ for (i = 0; i < 7; i++) { ++ if ((val64 & 0xff) == 0) ++ break; ++ ver = val64 & 0xff; ++ val64 >>= 8; ++ } ++ vlen = strlen(tp->fw_ver); ++ snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " .%02d", ver); ++ } ++} ++ ++static void __devinit tg3_read_fw_ver(struct tg3 *tp) ++{ ++ u32 val; ++ bool vpd_vers = false; ++ ++ if (tp->fw_ver[0] != 0) ++ vpd_vers = true; ++ ++ if (tg3_flag(tp, NO_NVRAM)) { ++ strcat(tp->fw_ver, "sb"); ++ tg3_read_otp_ver(tp); ++ return; ++ } ++ ++ if (tg3_nvram_read(tp, 0, &val)) ++ return; ++ ++ if (val == TG3_EEPROM_MAGIC) ++ tg3_read_bc_ver(tp); ++ else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) ++ tg3_read_sb_ver(tp, val); ++ else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW) ++ tg3_read_hwsb_ver(tp); ++ ++ if (tg3_flag(tp, ENABLE_ASF)) { ++ if (tg3_flag(tp, ENABLE_APE)) { ++ tg3_probe_ncsi(tp); ++ if (!vpd_vers) ++ tg3_read_dash_ver(tp); ++ } else if (!vpd_vers) { ++ tg3_read_mgmtfw_ver(tp); ++ } ++ } ++ ++ tp->fw_ver[TG3_VER_SIZE - 1] = 0; ++} ++ ++static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp) ++{ ++ if (tg3_flag(tp, LRG_PROD_RING_CAP)) ++ return TG3_RX_RET_MAX_SIZE_5717; ++ else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) ++ return TG3_RX_RET_MAX_SIZE_5700; ++ else ++ return TG3_RX_RET_MAX_SIZE_5705; ++} ++ ++#if (LINUX_VERSION_CODE >= 0x2060a) ++static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = { ++ { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) }, ++ { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) }, ++ { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) }, ++ { }, ++}; ++#endif ++ ++static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp) ++{ ++ struct pci_dev *peer; ++ unsigned int func, devnr = tp->pdev->devfn & ~7; ++ ++ for (func = 0; func < 8; func++) { ++ peer = pci_get_slot(tp->pdev->bus, devnr | func); ++ if (peer && peer != tp->pdev) ++ break; ++ pci_dev_put(peer); ++ } ++ /* 5704 can be configured in single-port mode, set peer to ++ * tp->pdev in that case. ++ */ ++ if (!peer) { ++ peer = tp->pdev; ++ return peer; ++ } ++ ++ /* ++ * We don't need to keep the refcount elevated; there's no way ++ * to remove one half of this device without removing the other ++ */ ++ pci_dev_put(peer); ++ ++ return peer; ++} ++ ++static void __devinit tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg) ++{ ++ tp->pci_chip_rev_id = misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT; ++ if (tg3_asic_rev(tp) == ASIC_REV_USE_PROD_ID_REG) { ++ u32 reg; ++ ++ /* All devices that use the alternate ++ * ASIC REV location have a CPMU. ++ */ ++ tg3_flag_set(tp, CPMU_PRESENT); ++ ++ if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 || ++ tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C || ++ tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 || ++ tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 || ++ tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 || ++ tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 || ++ tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 || ++ tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 || ++ tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 || ++ tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 || ++ tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787) ++ reg = TG3PCI_GEN2_PRODID_ASICREV; ++ else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 || ++ tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 || ++ tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 || ++ tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 || ++ tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 || ++ tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 || ++ tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 || ++ tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 || ++ tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 || ++ tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786) ++ reg = TG3PCI_GEN15_PRODID_ASICREV; ++ else ++ reg = TG3PCI_PRODID_ASICREV; ++ ++ pci_read_config_dword(tp->pdev, reg, &tp->pci_chip_rev_id); ++ } ++ ++ /* Wrong chip ID in 5752 A0. This code can be removed later ++ * as A0 is not in production. ++ */ ++ if (tg3_chip_rev_id(tp) == CHIPREV_ID_5752_A0_HW) ++ tp->pci_chip_rev_id = CHIPREV_ID_5752_A0; ++ ++ if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_C0) ++ tp->pci_chip_rev_id = CHIPREV_ID_5720_A0; ++ ++ if (tg3_asic_rev(tp) == ASIC_REV_5717 || ++ tg3_asic_rev(tp) == ASIC_REV_5719 || ++ tg3_asic_rev(tp) == ASIC_REV_5720) ++ tg3_flag_set(tp, 5717_PLUS); ++ ++ if (tg3_asic_rev(tp) == ASIC_REV_57765 || ++ tg3_asic_rev(tp) == ASIC_REV_57766) ++ tg3_flag_set(tp, 57765_CLASS); ++ ++ if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS) || ++ tg3_asic_rev(tp) == ASIC_REV_5762) ++ tg3_flag_set(tp, 57765_PLUS); ++ ++ /* Intentionally exclude ASIC_REV_5906 */ ++ if (tg3_asic_rev(tp) == ASIC_REV_5755 || ++ tg3_asic_rev(tp) == ASIC_REV_5787 || ++ tg3_asic_rev(tp) == ASIC_REV_5784 || ++ tg3_asic_rev(tp) == ASIC_REV_5761 || ++ tg3_asic_rev(tp) == ASIC_REV_5785 || ++ tg3_asic_rev(tp) == ASIC_REV_57780 || ++ tg3_flag(tp, 57765_PLUS)) ++ tg3_flag_set(tp, 5755_PLUS); ++ ++ if (tg3_asic_rev(tp) == ASIC_REV_5780 || ++ tg3_asic_rev(tp) == ASIC_REV_5714) ++ tg3_flag_set(tp, 5780_CLASS); ++ ++ if (tg3_asic_rev(tp) == ASIC_REV_5750 || ++ tg3_asic_rev(tp) == ASIC_REV_5752 || ++ tg3_asic_rev(tp) == ASIC_REV_5906 || ++ tg3_flag(tp, 5755_PLUS) || ++ tg3_flag(tp, 5780_CLASS)) ++ tg3_flag_set(tp, 5750_PLUS); ++ ++ if (tg3_asic_rev(tp) == ASIC_REV_5705 || ++ tg3_flag(tp, 5750_PLUS)) ++ tg3_flag_set(tp, 5705_PLUS); ++} ++ ++static bool tg3_10_100_only_device(struct tg3 *tp, ++ const struct pci_device_id *ent) ++{ ++ u32 grc_misc_cfg = tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK; ++ ++ if ((tg3_asic_rev(tp) == ASIC_REV_5703 && ++ (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) || ++ (tp->phy_flags & TG3_PHYFLG_IS_FET)) ++ return true; ++ ++ if (ent->driver_data & TG3_DRV_DATA_FLAG_10_100_ONLY) { ++ if (tg3_asic_rev(tp) == ASIC_REV_5705) { ++ if (ent->driver_data & TG3_DRV_DATA_FLAG_5705_10_100) ++ return true; ++ } else ++ return true; ++ } ++ ++ return false; ++} ++ ++static int __devinit tg3_get_invariants(struct tg3 *tp, ++ const struct pci_device_id *ent) ++{ ++ u32 misc_ctrl_reg; ++ u32 pci_state_reg, grc_misc_cfg; ++ u32 val; ++ u16 pci_cmd; ++ int err; ++ ++ /* Force memory write invalidate off. If we leave it on, ++ * then on 5700_BX chips we have to enable a workaround. ++ * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary ++ * to match the cacheline size. The Broadcom driver have this ++ * workaround but turns MWI off all the times so never uses ++ * it. This seems to suggest that the workaround is insufficient. ++ */ ++ pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd); ++ pci_cmd &= ~PCI_COMMAND_INVALIDATE; ++ pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd); ++ ++ /* Important! -- Make sure register accesses are byteswapped ++ * correctly. Also, for those chips that require it, make ++ * sure that indirect register accesses are enabled before ++ * the first operation. ++ */ ++ pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL, ++ &misc_ctrl_reg); ++ tp->misc_host_ctrl |= (misc_ctrl_reg & ++ MISC_HOST_CTRL_CHIPREV); ++ pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL, ++ tp->misc_host_ctrl); ++ ++ tg3_detect_asic_rev(tp, misc_ctrl_reg); ++ ++ /* Fix for CTRL-20413(Huawei)/19887 in KVM PCI Pass-thru mode ++ * Qemu is dropping the pci config space writes to ++ * 0x68, but it is 'not dropping' the BAR space access, ++ * since BAR registers is setup properly in KVM environment. ++ * This redundent write fixes the issue for KVM hypervisor ++ * in SUSE 11.3 reported by Huawei. ++ */ ++ if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C || ++ tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 || ++ tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 || ++ tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720) ++ tg3_write32(tp, TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl); ++ /*Fix for CTRL-20413 ends*/ ++ ++ /* If we have 5702/03 A1 or A2 on certain ICH chipsets, ++ * we need to disable memory and use config. cycles ++ * only to access all registers. The 5702/03 chips ++ * can mistakenly decode the special cycles from the ++ * ICH chipsets as memory write cycles, causing corruption ++ * of register and memory space. Only certain ICH bridges ++ * will drive special cycles with non-zero data during the ++ * address phase which can fall within the 5703's address ++ * range. This is not an ICH bug as the PCI spec allows ++ * non-zero address during special cycles. However, only ++ * these ICH bridges are known to drive non-zero addresses ++ * during special cycles. ++ * ++ * Since special cycles do not cross PCI bridges, we only ++ * enable this workaround if the 5703 is on the secondary ++ * bus of these ICH bridges. ++ */ ++ if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1) || ++ (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A2)) { ++ static struct tg3_dev_id { ++ u32 vendor; ++ u32 device; ++ u32 rev; ++ } ich_chipsets[] = { ++ { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8, ++ PCI_ANY_ID }, ++ { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8, ++ PCI_ANY_ID }, ++ { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11, ++ 0xa }, ++ { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6, ++ PCI_ANY_ID }, ++ { }, ++ }; ++ struct tg3_dev_id *pci_id = &ich_chipsets[0]; ++ struct pci_dev *bridge = NULL; ++ ++ while (pci_id->vendor != 0) { ++ bridge = pci_get_device(pci_id->vendor, pci_id->device, ++ bridge); ++ if (!bridge) { ++ pci_id++; ++ continue; ++ } ++ if (pci_id->rev != PCI_ANY_ID) { ++ u8 rev; ++ ++ pci_read_config_byte(bridge, PCI_REVISION_ID, ++ &rev); ++ if (rev > pci_id->rev) ++ continue; ++ } ++ if (bridge->subordinate && ++ (bridge->subordinate->number == ++ tp->pdev->bus->number)) { ++ tg3_flag_set(tp, ICH_WORKAROUND); ++ pci_dev_put(bridge); ++ break; ++ } ++ } ++ } ++ ++ if (tg3_asic_rev(tp) == ASIC_REV_5701) { ++ static struct tg3_dev_id { ++ u32 vendor; ++ u32 device; ++ } bridge_chipsets[] = { ++ { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 }, ++ { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 }, ++ { }, ++ }; ++ struct tg3_dev_id *pci_id = &bridge_chipsets[0]; ++ struct pci_dev *bridge = NULL; ++ ++ while (pci_id->vendor != 0) { ++ bridge = pci_get_device(pci_id->vendor, ++ pci_id->device, ++ bridge); ++ if (!bridge) { ++ pci_id++; ++ continue; ++ } ++ if (bridge->subordinate && ++ (bridge->subordinate->number <= ++ tp->pdev->bus->number) && ++ (bridge->subordinate->busn_res_end >= ++ tp->pdev->bus->number)) { ++ tg3_flag_set(tp, 5701_DMA_BUG); ++ pci_dev_put(bridge); ++ break; ++ } ++ } ++ } ++ ++ /* The EPB bridge inside 5714, 5715, and 5780 cannot support ++ * DMA addresses > 40-bit. This bridge may have other additional ++ * 57xx devices behind it in some 4-port NIC designs for example. ++ * Any tg3 device found behind the bridge will also need the 40-bit ++ * DMA workaround. ++ */ ++ if (tg3_flag(tp, 5780_CLASS)) { ++ tg3_flag_set(tp, 40BIT_DMA_BUG); ++ tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI); ++ } else { ++ struct pci_dev *bridge = NULL; ++ ++ do { ++ bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS, ++ PCI_DEVICE_ID_SERVERWORKS_EPB, ++ bridge); ++ if (bridge && bridge->subordinate && ++ (bridge->subordinate->number <= ++ tp->pdev->bus->number) && ++ (bridge->subordinate->busn_res_end >= ++ tp->pdev->bus->number)) { ++ tg3_flag_set(tp, 40BIT_DMA_BUG); ++ pci_dev_put(bridge); ++ break; ++ } ++ } while (bridge); ++ } ++ ++ if (tg3_asic_rev(tp) == ASIC_REV_5704 || ++ tg3_asic_rev(tp) == ASIC_REV_5714) ++ tp->pdev_peer = tg3_find_peer(tp); ++ ++ /* Determine TSO capabilities */ ++ if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0) ++ ; /* Do nothing. HW bug. */ ++ else if (tg3_flag(tp, 57765_PLUS)) ++ tg3_flag_set(tp, HW_TSO_3); ++ else if (tg3_flag(tp, 5755_PLUS) || ++ tg3_asic_rev(tp) == ASIC_REV_5906) ++ tg3_flag_set(tp, HW_TSO_2); ++ else if (tg3_flag(tp, 5750_PLUS)) { ++ tg3_flag_set(tp, HW_TSO_1); ++ tg3_flag_set(tp, TSO_BUG); ++ if (tg3_asic_rev(tp) == ASIC_REV_5750 && ++ tg3_chip_rev_id(tp) >= CHIPREV_ID_5750_C2) ++ tg3_flag_clear(tp, TSO_BUG); ++ } else if (tg3_asic_rev(tp) != ASIC_REV_5700 && ++ tg3_asic_rev(tp) != ASIC_REV_5701 && ++ tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) { ++ tg3_flag_set(tp, FW_TSO); ++ tg3_flag_set(tp, TSO_BUG); ++ if (tg3_asic_rev(tp) == ASIC_REV_5705) ++ tp->fw_needed = FIRMWARE_TG3TSO5; ++ else ++ tp->fw_needed = FIRMWARE_TG3TSO; ++ } ++ ++ /* Selectively allow TSO based on operating conditions */ ++ if (tg3_flag(tp, HW_TSO_1) || ++ tg3_flag(tp, HW_TSO_2) || ++ tg3_flag(tp, HW_TSO_3) || ++ tg3_flag(tp, FW_TSO)) { ++ /* For firmware TSO, assume ASF is disabled. ++ * We'll disable TSO later if we discover ASF ++ * is enabled in tg3_get_eeprom_hw_cfg(). ++ */ ++ tg3_flag_set(tp, TSO_CAPABLE); ++ } else { ++ tg3_flag_clear(tp, TSO_CAPABLE); ++ tg3_flag_clear(tp, TSO_BUG); ++ tp->fw_needed = NULL; ++ } ++ ++ if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) ++ tp->fw_needed = FIRMWARE_TG3; ++ ++ if (tg3_asic_rev(tp) == ASIC_REV_57766) ++ tp->fw_needed = FIRMWARE_TG357766; ++ ++ tp->irq_max = 1; ++ ++ if (tg3_flag(tp, 5750_PLUS)) { ++ tg3_flag_set(tp, SUPPORT_MSI); ++ if (tg3_chip_rev(tp) == CHIPREV_5750_AX || ++ tg3_chip_rev(tp) == CHIPREV_5750_BX || ++ (tg3_asic_rev(tp) == ASIC_REV_5714 && ++ tg3_chip_rev_id(tp) <= CHIPREV_ID_5714_A2 && ++ tp->pdev_peer == tp->pdev)) ++ tg3_flag_clear(tp, SUPPORT_MSI); ++ ++ if (tg3_flag(tp, 5755_PLUS) || ++ tg3_asic_rev(tp) == ASIC_REV_5906) { ++ tg3_flag_set(tp, 1SHOT_MSI); ++ } ++ ++ if (tg3_flag(tp, 57765_PLUS)) { ++ tg3_flag_set(tp, SUPPORT_MSIX); ++#ifdef TG3_NAPI ++ tp->irq_max = TG3_IRQ_MAX_VECS_RSS; ++#endif ++ } ++#if defined(__VMKLNX__) ++ tp->irq_max = 1; ++#if defined(TG3_VMWARE_NETQ_ENABLE) && !defined(TG3_INBOX) ++ if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 || ++ tg3_asic_rev(tp) == ASIC_REV_5719 || ++ (tg3_asic_rev(tp) == ASIC_REV_5720 && ++ tp->pdev->device != TG3PCI_DEVICE_TIGON3_5717_C)) { ++ tp->vmware.netq.index = tg3_netq_index++; ++ tg3_flag_set(tp, IOV_CAPABLE); ++ tg3_flag_clear(tp, 1SHOT_MSI); ++ tp->irq_max = min(TG3_IRQ_MAX_VECS, TG3_IRQ_MAX_VECS_IOV); ++ } ++#endif /* TG3_VMWARE_NETQ_ENABLE && !TG3_INBOX */ ++#endif /* __VMKLNX__ */ ++ } ++ ++ tp->txq_max = 1; ++ tp->rxq_max = 1; ++ if (tp->irq_max > 1) { ++#ifdef TG3_VMWARE_NETQ_ENABLE ++ if (tg3_flag(tp, IOV_CAPABLE)) ++ tp->rxq_max = tp->irq_max; ++ else ++ tp->rxq_max = 1; ++#else ++ tp->rxq_max = TG3_RSS_MAX_NUM_QS; ++ tg3_rss_init_dflt_indir_tbl(tp, TG3_RSS_MAX_NUM_QS); ++#endif ++ ++ if (tg3_asic_rev(tp) == ASIC_REV_5719 || ++ tg3_asic_rev(tp) == ASIC_REV_5720) ++ tp->txq_max = tp->irq_max - 1; ++ } ++ ++ if (tg3_flag(tp, 5755_PLUS) || ++ tg3_asic_rev(tp) == ASIC_REV_5906) ++ tg3_flag_set(tp, SHORT_DMA_BUG); ++ ++ if (tg3_asic_rev(tp) == ASIC_REV_5719 || ++ tg3_asic_rev(tp) == ASIC_REV_5762) ++ tp->dma_limit = TG3_TX_BD_DMA_MAX_4K; ++#if defined(__VMKLNX__) ++ else if (tg3_flag(tp, TSO_CAPABLE)) ++ tp->dma_limit = TG3_TX_BD_DMA_MAX_32K; ++#endif ++ if (tg3_asic_rev(tp) == ASIC_REV_5717 || ++ tg3_asic_rev(tp) == ASIC_REV_5719 || ++ tg3_asic_rev(tp) == ASIC_REV_5720 || ++ tg3_asic_rev(tp) == ASIC_REV_5762) ++ tg3_flag_set(tp, LRG_PROD_RING_CAP); ++ ++ if (tg3_flag(tp, 57765_PLUS) && ++ tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0) ++ tg3_flag_set(tp, USE_JUMBO_BDFLAG); ++ ++ if (!tg3_flag(tp, 5705_PLUS) || ++ tg3_flag(tp, 5780_CLASS) || ++ tg3_flag(tp, USE_JUMBO_BDFLAG)) ++ tg3_flag_set(tp, JUMBO_CAPABLE); ++ ++ pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, ++ &pci_state_reg); ++ ++#ifndef BCM_HAS_PCI_PCIE_CAP ++ tp->pcie_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_EXP); ++#endif ++ ++ if (pci_is_pcie(tp->pdev)) { ++ u16 lnkctl; ++ ++ tg3_flag_set(tp, PCI_EXPRESS); ++ ++ if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0) { ++ int readrq = pcie_get_readrq(tp->pdev); ++ if (readrq > 2048) ++ pcie_set_readrq(tp->pdev, 2048); ++ } ++ ++ pcie_capability_read_word(tp->pdev, PCI_EXP_LNKCTL, &lnkctl); ++ if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) { ++ if (tg3_asic_rev(tp) == ASIC_REV_5906) { ++ tg3_flag_clear(tp, HW_TSO_2); ++ tg3_flag_clear(tp, TSO_CAPABLE); ++ } ++ if (tg3_asic_rev(tp) == ASIC_REV_5784 || ++ tg3_asic_rev(tp) == ASIC_REV_5761 || ++ tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A0 || ++ tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A1) ++ tg3_flag_set(tp, CLKREQ_BUG); ++ } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_A0) { ++ tg3_flag_set(tp, L1PLLPD_EN); ++ } ++ } else if (tg3_asic_rev(tp) == ASIC_REV_5785) { ++ /* BCM5785 devices are effectively PCIe devices, and should ++ * follow PCIe codepaths, but do not have a PCIe capabilities ++ * section. ++ */ ++ tg3_flag_set(tp, PCI_EXPRESS); ++ } else if (!tg3_flag(tp, 5705_PLUS) || ++ tg3_flag(tp, 5780_CLASS)) { ++ tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX); ++ if (!tp->pcix_cap) { ++ dev_err(&tp->pdev->dev, ++ "Cannot find PCI-X capability, aborting\n"); ++ return -EIO; ++ } ++ ++ if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE)) ++ tg3_flag_set(tp, PCIX_MODE); ++ } ++ ++ /* If we have an AMD 762 or VIA K8T800 chipset, write ++ * reordering to the mailbox registers done by the host ++ * controller can cause major troubles. We read back from ++ * every mailbox register write to force the writes to be ++ * posted to the chip in order. ++ */ ++#if (LINUX_VERSION_CODE < 0x2060a) ++ if ((pci_find_device(PCI_VENDOR_ID_AMD, ++ PCI_DEVICE_ID_AMD_FE_GATE_700C, NULL) || ++ pci_find_device(PCI_VENDOR_ID_AMD, ++ PCI_DEVICE_ID_AMD_8131_BRIDGE, NULL) || ++ pci_find_device(PCI_VENDOR_ID_VIA, ++ PCI_DEVICE_ID_VIA_8385_0, NULL)) && ++#else ++ if (pci_dev_present(tg3_write_reorder_chipsets) && ++#endif ++ !tg3_flag(tp, PCI_EXPRESS)) ++ tg3_flag_set(tp, MBOX_WRITE_REORDER); ++ ++ pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, ++ &tp->pci_cacheline_sz); ++ pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER, ++ &tp->pci_lat_timer); ++ if (tg3_asic_rev(tp) == ASIC_REV_5703 && ++ tp->pci_lat_timer < 64) { ++ tp->pci_lat_timer = 64; ++ pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER, ++ tp->pci_lat_timer); ++ } ++ ++ /* Important! -- It is critical that the PCI-X hw workaround ++ * situation is decided before the first MMIO register access. ++ */ ++ if (tg3_chip_rev(tp) == CHIPREV_5700_BX) { ++ /* 5700 BX chips need to have their TX producer index ++ * mailboxes written twice to workaround a bug. ++ */ ++ tg3_flag_set(tp, TXD_MBOX_HWBUG); ++ ++ /* If we are in PCI-X mode, enable register write workaround. ++ * ++ * The workaround is to use indirect register accesses ++ * for all chip writes not to mailbox registers. ++ */ ++ if (tg3_flag(tp, PCIX_MODE)) { ++ u32 pm_reg; ++ ++ tg3_flag_set(tp, PCIX_TARGET_HWBUG); ++ ++ /* The chip can have it's power management PCI config ++ * space registers clobbered due to this bug. ++ * So explicitly force the chip into D0 here. ++ */ ++ pci_read_config_dword(tp->pdev, ++ tp->pm_cap + PCI_PM_CTRL, ++ &pm_reg); ++ pm_reg &= ~PCI_PM_CTRL_STATE_MASK; ++ pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */; ++ pci_write_config_dword(tp->pdev, ++ tp->pm_cap + PCI_PM_CTRL, ++ pm_reg); ++ ++ /* Also, force SERR#/PERR# in PCI command. */ ++ pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd); ++ pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR; ++ pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd); ++ } ++ } ++ ++ if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0) ++ tg3_flag_set(tp, PCI_HIGH_SPEED); ++ if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0) ++ tg3_flag_set(tp, PCI_32BIT); ++ ++ /* Chip-specific fixup from Broadcom driver */ ++ if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0) && ++ (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) { ++ pci_state_reg |= PCISTATE_RETRY_SAME_DMA; ++ pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg); ++ } ++ ++ /* Default fast path register access methods */ ++ tp->read32 = tg3_read32; ++ tp->write32 = tg3_write32; ++ tp->read32_mbox = tg3_read32; ++ tp->write32_mbox = tg3_write32; ++ tp->write32_tx_mbox = tg3_write32; ++ tp->write32_rx_mbox = tg3_write32; ++ ++ /* Various workaround register access methods */ ++ if (tg3_flag(tp, PCIX_TARGET_HWBUG)) ++ tp->write32 = tg3_write_indirect_reg32; ++ else if (tg3_asic_rev(tp) == ASIC_REV_5701 || ++ (tg3_flag(tp, PCI_EXPRESS) && ++ tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0)) { ++ /* ++ * Back to back register writes can cause problems on these ++ * chips, the workaround is to read back all reg writes ++ * except those to mailbox regs. ++ * ++ * See tg3_write_indirect_reg32(). ++ */ ++ tp->write32 = tg3_write_flush_reg32; ++ } ++ ++ if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) { ++ tp->write32_tx_mbox = tg3_write32_tx_mbox; ++ if (tg3_flag(tp, MBOX_WRITE_REORDER)) ++ tp->write32_rx_mbox = tg3_write_flush_reg32; ++ } ++ ++ if (tg3_flag(tp, ICH_WORKAROUND)) { ++ tp->read32 = tg3_read_indirect_reg32; ++ tp->write32 = tg3_write_indirect_reg32; ++ tp->read32_mbox = tg3_read_indirect_mbox; ++ tp->write32_mbox = tg3_write_indirect_mbox; ++ tp->write32_tx_mbox = tg3_write_indirect_mbox; ++ tp->write32_rx_mbox = tg3_write_indirect_mbox; ++ ++ iounmap(tp->regs); ++ tp->regs = NULL; ++ ++ pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd); ++ pci_cmd &= ~PCI_COMMAND_MEMORY; ++ pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd); ++ } ++ if (tg3_asic_rev(tp) == ASIC_REV_5906) { ++ tp->read32_mbox = tg3_read32_mbox_5906; ++ tp->write32_mbox = tg3_write32_mbox_5906; ++ tp->write32_tx_mbox = tg3_write32_mbox_5906; ++ tp->write32_rx_mbox = tg3_write32_mbox_5906; ++ } ++ ++ if (tp->write32 == tg3_write_indirect_reg32 || ++ (tg3_flag(tp, PCIX_MODE) && ++ (tg3_asic_rev(tp) == ASIC_REV_5700 || ++ tg3_asic_rev(tp) == ASIC_REV_5701))) ++ tg3_flag_set(tp, SRAM_USE_CONFIG); ++ ++ /* The memory arbiter has to be enabled in order for SRAM accesses ++ * to succeed. Normally on powerup the tg3 chip firmware will make ++ * sure it is enabled, but other entities such as system netboot ++ * code might disable it. ++ */ ++ val = tr32(MEMARB_MODE); ++ tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE); ++ ++ tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3; ++ if (tg3_asic_rev(tp) == ASIC_REV_5704 || ++ tg3_flag(tp, 5780_CLASS)) { ++ if (tg3_flag(tp, PCIX_MODE)) { ++ pci_read_config_dword(tp->pdev, ++ tp->pcix_cap + PCI_X_STATUS, ++ &val); ++ tp->pci_fn = val & 0x7; ++ } ++ } else if (tg3_asic_rev(tp) == ASIC_REV_5717 || ++ tg3_asic_rev(tp) == ASIC_REV_5719 || ++ tg3_asic_rev(tp) == ASIC_REV_5720) { ++ tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val); ++ if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) != NIC_SRAM_CPMUSTAT_SIG) ++ val = tr32(TG3_CPMU_STATUS); ++ ++ if (tg3_asic_rev(tp) == ASIC_REV_5717) ++ tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5717) ? 1 : 0; ++ else ++ tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >> ++ TG3_CPMU_STATUS_FSHFT_5719; ++ } ++ ++ if (tg3_flag(tp, FLUSH_POSTED_WRITES)) { ++ tp->write32_tx_mbox = tg3_write_flush_reg32; ++ tp->write32_rx_mbox = tg3_write_flush_reg32; ++ } ++ ++ /* Get eeprom hw config before calling tg3_set_power_state(). ++ * In particular, the TG3_FLAG_IS_NIC flag must be ++ * determined before calling tg3_set_power_state() so that ++ * we know whether or not to switch out of Vaux power. ++ * When the flag is set, it means that GPIO1 is used for eeprom ++ * write protect and also implies that it is a LOM where GPIOs ++ * are not used to switch power. ++ */ ++ tg3_get_eeprom_hw_cfg(tp); ++ ++ if (tg3_flag(tp, FW_TSO) && tg3_flag(tp, ENABLE_ASF)) { ++ tg3_flag_clear(tp, TSO_CAPABLE); ++ tg3_flag_clear(tp, TSO_BUG); ++ tp->fw_needed = NULL; ++ } ++ ++ if (tg3_flag(tp, ENABLE_APE)) { ++ /* Allow reads and writes to the ++ * APE register and memory space. ++ */ ++ pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR | ++ PCISTATE_ALLOW_APE_SHMEM_WR | ++ PCISTATE_ALLOW_APE_PSPACE_WR; ++ pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, ++ pci_state_reg); ++ ++ tg3_ape_lock_init(tp); ++ tp->ape_hb_interval = ++ msecs_to_jiffies(APE_HOST_HEARTBEAT_INT_5SEC); ++ } ++ ++#if !defined(__VMKLNX__) ++ tp->recoverable_err_interval = msecs_to_jiffies(RECOVERABLE_ERR_10SEC); ++#endif ++ ++ /* Set up tp->grc_local_ctrl before calling ++ * tg3_pwrsrc_switch_to_vmain(). GPIO1 driven high ++ * will bring 5700's external PHY out of reset. ++ * It is also used as eeprom write protect on LOMs. ++ */ ++ tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM; ++ if (tg3_asic_rev(tp) == ASIC_REV_5700 || ++ tg3_flag(tp, EEPROM_WRITE_PROT)) ++ tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 | ++ GRC_LCLCTRL_GPIO_OUTPUT1); ++ /* Unused GPIO3 must be driven as output on 5752 because there ++ * are no pull-up resistors on unused GPIO pins. ++ */ ++ else if (tg3_asic_rev(tp) == ASIC_REV_5752) ++ tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3; ++ ++ if (tg3_asic_rev(tp) == ASIC_REV_5755 || ++ tg3_asic_rev(tp) == ASIC_REV_57780 || ++ tg3_flag(tp, 57765_CLASS)) ++ tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL; ++ ++ if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 || ++ tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) { ++ /* Turn off the debug UART. */ ++ tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL; ++ if (tg3_flag(tp, IS_NIC)) ++ /* Keep VMain power. */ ++ tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 | ++ GRC_LCLCTRL_GPIO_OUTPUT0; ++ } ++ ++ if (tg3_asic_rev(tp) == ASIC_REV_5762) ++ tp->grc_local_ctrl |= ++ tr32(GRC_LOCAL_CTRL) & GRC_LCLCTRL_GPIO_UART_SEL; ++ ++ /* Switch out of Vaux if it is a NIC */ ++ tg3_pwrsrc_switch_to_vmain(tp); ++ ++ /* Derive initial jumbo mode from MTU assigned in ++ * ether_setup() via the alloc_etherdev() call ++ */ ++ if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS)) ++ tg3_flag_set(tp, JUMBO_RING_ENABLE); ++ ++ /* Determine WakeOnLan speed to use. */ ++ if (tg3_asic_rev(tp) == ASIC_REV_5700 || ++ tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 || ++ tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 || ++ tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2) { ++ tg3_flag_clear(tp, WOL_SPEED_100MB); ++ } else { ++ tg3_flag_set(tp, WOL_SPEED_100MB); ++ } ++ ++#ifndef BCM_INCLUDE_PHYLIB_SUPPORT ++ if (tg3_asic_rev(tp) == ASIC_REV_5906 || ++ (tg3_asic_rev(tp) == ASIC_REV_5785 && ++ (tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCMAC131)) ++ tp->phy_flags |= TG3_PHYFLG_IS_FET; ++#else ++ if (tg3_asic_rev(tp) == ASIC_REV_5906) ++ tp->phy_flags |= TG3_PHYFLG_IS_FET; ++#endif ++ ++ /* A few boards don't want Ethernet@WireSpeed phy feature */ ++ if (tg3_asic_rev(tp) == ASIC_REV_5700 || ++ (tg3_asic_rev(tp) == ASIC_REV_5705 && ++ (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) && ++ (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A1)) || ++ (tp->phy_flags & TG3_PHYFLG_IS_FET) || ++ (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) ++ tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED; ++ ++ if (tg3_chip_rev(tp) == CHIPREV_5703_AX || ++ tg3_chip_rev(tp) == CHIPREV_5704_AX) ++ tp->phy_flags |= TG3_PHYFLG_ADC_BUG; ++ if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0) ++ tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG; ++ ++ if (tg3_flag(tp, 5705_PLUS) && ++ !(tp->phy_flags & TG3_PHYFLG_IS_FET) && ++ tg3_asic_rev(tp) != ASIC_REV_5785 && ++ tg3_asic_rev(tp) != ASIC_REV_57780 && ++ !tg3_flag(tp, 57765_PLUS)) { ++ if (tg3_asic_rev(tp) == ASIC_REV_5755 || ++ tg3_asic_rev(tp) == ASIC_REV_5787 || ++ tg3_asic_rev(tp) == ASIC_REV_5784 || ++ tg3_asic_rev(tp) == ASIC_REV_5761) { ++ if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 && ++ tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722) ++ tp->phy_flags |= TG3_PHYFLG_JITTER_BUG; ++ if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M) ++ tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM; ++ } else ++ tp->phy_flags |= TG3_PHYFLG_BER_BUG; ++ } ++ ++ if (tg3_asic_rev(tp) == ASIC_REV_5784 && ++ tg3_chip_rev(tp) != CHIPREV_5784_AX) { ++ tp->phy_otp = tg3_read_otp_phycfg(tp); ++ if (tp->phy_otp == 0) ++ tp->phy_otp = TG3_OTP_DEFAULT; ++ } ++ ++ if (tg3_flag(tp, CPMU_PRESENT)) ++ tp->mi_mode = MAC_MI_MODE_500KHZ_CONST; ++ else ++ tp->mi_mode = MAC_MI_MODE_BASE; ++ ++ tp->coalesce_mode = 0; ++ if (tg3_chip_rev(tp) != CHIPREV_5700_AX && ++ tg3_chip_rev(tp) != CHIPREV_5700_BX) ++ tp->coalesce_mode |= HOSTCC_MODE_32BYTE; ++ ++ /* Set these bits to enable statistics workaround. */ ++ if (tg3_asic_rev(tp) == ASIC_REV_5717 || ++ tg3_asic_rev(tp) == ASIC_REV_5762 || ++ tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 || ++ tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0) { ++ tp->coalesce_mode |= HOSTCC_MODE_ATTN; ++ tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN; ++ } ++ ++#ifdef BCM_INCLUDE_PHYLIB_SUPPORT ++ if (tg3_asic_rev(tp) == ASIC_REV_5785 || ++ tg3_asic_rev(tp) == ASIC_REV_57780) ++ tg3_flag_set(tp, USE_PHYLIB); ++#endif ++ ++ err = tg3_mdio_init(tp); ++ if (err) ++ return err; ++ ++ /* Initialize data/descriptor byte/word swapping. */ ++ val = tr32(GRC_MODE); ++ if (tg3_asic_rev(tp) == ASIC_REV_5720 || ++ tg3_asic_rev(tp) == ASIC_REV_5762) ++ val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA | ++ GRC_MODE_WORD_SWAP_B2HRX_DATA | ++ GRC_MODE_B2HRX_ENABLE | ++ GRC_MODE_HTX2B_ENABLE | ++ GRC_MODE_HOST_STACKUP); ++ else ++ val &= GRC_MODE_HOST_STACKUP; ++ ++ tw32(GRC_MODE, val | tp->grc_mode); ++ ++ tg3_switch_clocks(tp); ++ ++ /* Clear this out for sanity. */ ++ tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0); ++ ++ /* Clear TG3PCI_REG_BASE_ADDR to prevent hangs. */ ++ tw32(TG3PCI_REG_BASE_ADDR, 0); ++ ++ pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, ++ &pci_state_reg); ++ if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 && ++ !tg3_flag(tp, PCIX_TARGET_HWBUG)) { ++ if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 || ++ tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 || ++ tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2 || ++ tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B5) { ++ void __iomem *sram_base; ++ ++ /* Write some dummy words into the SRAM status block ++ * area, see if it reads back correctly. If the return ++ * value is bad, force enable the PCIX workaround. ++ */ ++ sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK; ++ ++ writel(0x00000000, sram_base); ++ writel(0x00000000, sram_base + 4); ++ writel(0xffffffff, sram_base + 4); ++ if (readl(sram_base) != 0x00000000) ++ tg3_flag_set(tp, PCIX_TARGET_HWBUG); ++ } ++ } ++ ++ udelay(50); ++ tg3_nvram_init(tp); ++ ++ /* If the device has an NVRAM, no need to load patch firmware */ ++ if (tg3_asic_rev(tp) == ASIC_REV_57766 && ++ !tg3_flag(tp, NO_NVRAM)) ++ tp->fw_needed = NULL; ++ ++ grc_misc_cfg = tr32(GRC_MISC_CFG); ++ grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK; ++ ++ if (tg3_asic_rev(tp) == ASIC_REV_5705 && ++ (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 || ++ grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M)) ++ tg3_flag_set(tp, IS_5788); ++ ++ if (!tg3_flag(tp, IS_5788) && ++ tg3_asic_rev(tp) != ASIC_REV_5700) ++ tg3_flag_set(tp, TAGGED_STATUS); ++ if (tg3_flag(tp, TAGGED_STATUS)) { ++ tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD | ++ HOSTCC_MODE_CLRTICK_TXBD); ++ ++ tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS; ++ pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL, ++ tp->misc_host_ctrl); ++ } ++ ++ /* Preserve the APE MAC_MODE bits */ ++ if (tg3_flag(tp, ENABLE_APE)) ++ tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN; ++ else ++ tp->mac_mode = 0; ++ ++ if (tg3_10_100_only_device(tp, ent)) ++ tp->phy_flags |= TG3_PHYFLG_10_100_ONLY; ++ ++ err = tg3_phy_probe(tp); ++ if (err) { ++ dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err); ++ /* ... but do not return immediately ... */ ++ tg3_mdio_fini(tp); ++ } ++ ++ tg3_read_vpd(tp); ++ tg3_read_fw_ver(tp); ++ ++ if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) { ++ tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT; ++ } else { ++ if (tg3_asic_rev(tp) == ASIC_REV_5700) ++ tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT; ++ else ++ tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT; ++ } ++ ++ /* 5700 {AX,BX} chips have a broken status block link ++ * change bit implementation, so we must use the ++ * status register in those cases. ++ */ ++ if (tg3_asic_rev(tp) == ASIC_REV_5700) ++ tg3_flag_set(tp, USE_LINKCHG_REG); ++ else ++ tg3_flag_clear(tp, USE_LINKCHG_REG); ++ ++ /* The led_ctrl is set during tg3_phy_probe, here we might ++ * have to force the link status polling mechanism based ++ * upon subsystem IDs. ++ */ ++ if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL && ++ tg3_asic_rev(tp) == ASIC_REV_5701 && ++ !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) { ++ tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT; ++ tg3_flag_set(tp, USE_LINKCHG_REG); ++ } ++ ++ /* For all SERDES we poll the MAC status register. */ ++ if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ++ tg3_flag_set(tp, POLL_SERDES); ++ else ++ tg3_flag_clear(tp, POLL_SERDES); ++ ++ if (tg3_flag(tp, ENABLE_APE) && tg3_flag(tp, ENABLE_ASF)) ++ tg3_flag_set(tp, POLL_CPMU_LINK); ++ ++ tp->rx_offset = NET_IP_ALIGN; ++ tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD; ++ if (tg3_asic_rev(tp) == ASIC_REV_5701 && ++ tg3_flag(tp, PCIX_MODE)) { ++ tp->rx_offset = 0; ++#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS ++ tp->rx_copy_thresh = ~(u16)0; ++#endif ++ } ++ ++ tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1; ++ tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1; ++ tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1; ++ ++ tp->rx_std_max_post = tp->rx_std_ring_mask + 1; ++ ++ /* Increment the rx prod index on the rx std ring by at most ++ * 8 for these chips to workaround hw errata. ++ */ ++ if (tg3_asic_rev(tp) == ASIC_REV_5750 || ++ tg3_asic_rev(tp) == ASIC_REV_5752 || ++ tg3_asic_rev(tp) == ASIC_REV_5755) ++ tp->rx_std_max_post = 8; ++ ++ if (tg3_flag(tp, ASPM_WORKAROUND)) ++ tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) & ++ PCIE_PWR_MGMT_L1_THRESH_MSK; ++ ++ return err; ++} ++ ++#ifdef CONFIG_SPARC ++static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp) ++{ ++ struct net_device *dev = tp->dev; ++ struct pci_dev *pdev = tp->pdev; ++ struct device_node *dp = pci_device_to_OF_node(pdev); ++ const unsigned char *addr; ++ int len; ++ ++ addr = of_get_property(dp, "local-mac-address", &len); ++ if (addr && len == ETH_ALEN) { ++ memcpy(dev->dev_addr, addr, 6); ++ memcpy(dev->perm_addr, dev->dev_addr, ETH_ALEN); ++ return 0; ++ } ++ return -ENODEV; ++} ++ ++static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp) ++{ ++ struct net_device *dev = tp->dev; ++ ++ memcpy(dev->dev_addr, idprom->id_ethaddr, ETH_ALEN); ++ memcpy(dev->perm_addr, idprom->id_ethaddr, ETH_ALEN); ++ return 0; ++} ++#endif ++ ++static int __devinit tg3_get_device_address(struct tg3 *tp) ++{ ++ struct net_device *dev = tp->dev; ++ u32 hi, lo, mac_offset; ++ int addr_ok = 0; ++ int err; ++ ++#ifdef CONFIG_SPARC ++ if (!tg3_get_macaddr_sparc(tp)) ++ return 0; ++#endif ++ ++ if (tg3_flag(tp, IS_SSB_CORE)) { ++ err = ssb_gige_get_macaddr(tp->pdev, &dev->dev_addr[0]); ++ if (!err && is_valid_ether_addr(&dev->dev_addr[0])) ++ return 0; ++ } ++ ++ mac_offset = 0x7c; ++ if (tg3_asic_rev(tp) == ASIC_REV_5704 || ++ tg3_flag(tp, 5780_CLASS)) { ++ if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID) ++ mac_offset = 0xcc; ++ if (tg3_nvram_lock(tp)) ++ tw32_f(NVRAM_CMD, NVRAM_CMD_RESET); ++ else ++ tg3_nvram_unlock(tp); ++ } else if (tg3_flag(tp, 5717_PLUS)) { ++ if (tp->pci_fn & 1) ++ mac_offset = 0xcc; ++ if (tp->pci_fn > 1) ++ mac_offset += 0x18c; ++ } else if (tg3_asic_rev(tp) == ASIC_REV_5906) ++ mac_offset = 0x10; ++ ++ /* First try to get it from MAC address mailbox. */ ++ tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi); ++ if ((hi >> 16) == 0x484b) { ++ dev->dev_addr[0] = (hi >> 8) & 0xff; ++ dev->dev_addr[1] = (hi >> 0) & 0xff; ++ ++ tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo); ++ dev->dev_addr[2] = (lo >> 24) & 0xff; ++ dev->dev_addr[3] = (lo >> 16) & 0xff; ++ dev->dev_addr[4] = (lo >> 8) & 0xff; ++ dev->dev_addr[5] = (lo >> 0) & 0xff; ++ ++ /* Some old bootcode may report a 0 MAC address in SRAM */ ++ addr_ok = is_valid_ether_addr(&dev->dev_addr[0]); ++ } ++ if (!addr_ok) { ++ /* Next, try NVRAM. */ ++ if (!tg3_flag(tp, NO_NVRAM) && ++ !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) && ++ !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) { ++ memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2); ++ memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo)); ++ } ++ /* Finally just fetch it out of the MAC control regs. */ ++ else { ++ hi = tr32(MAC_ADDR_0_HIGH); ++ lo = tr32(MAC_ADDR_0_LOW); ++ ++ dev->dev_addr[5] = lo & 0xff; ++ dev->dev_addr[4] = (lo >> 8) & 0xff; ++ dev->dev_addr[3] = (lo >> 16) & 0xff; ++ dev->dev_addr[2] = (lo >> 24) & 0xff; ++ dev->dev_addr[1] = hi & 0xff; ++ dev->dev_addr[0] = (hi >> 8) & 0xff; ++ } ++ } ++ ++ if (!is_valid_ether_addr(&dev->dev_addr[0])) { ++#ifdef CONFIG_SPARC ++ if (!tg3_get_default_macaddr_sparc(tp)) ++ return 0; ++#endif ++ return -EINVAL; ++ } ++#ifdef ETHTOOL_GPERMADDR ++ memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len); ++#endif ++ return 0; ++} ++ ++#define BOUNDARY_SINGLE_CACHELINE 1 ++#define BOUNDARY_MULTI_CACHELINE 2 ++ ++static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val) ++{ ++ int cacheline_size; ++ u8 byte; ++ int goal; ++ ++ pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte); ++ if (byte == 0) ++ cacheline_size = 1024; ++ else ++ cacheline_size = (int) byte * 4; ++ ++ /* On 5703 and later chips, the boundary bits have no ++ * effect. ++ */ ++ if (tg3_asic_rev(tp) != ASIC_REV_5700 && ++ tg3_asic_rev(tp) != ASIC_REV_5701 && ++ !tg3_flag(tp, PCI_EXPRESS)) ++ goto out; ++ ++#if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC) ++ goal = BOUNDARY_MULTI_CACHELINE; ++#else ++#if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA) ++ goal = BOUNDARY_SINGLE_CACHELINE; ++#else ++ goal = 0; ++#endif ++#endif ++ ++ if (tg3_flag(tp, 57765_PLUS)) { ++ val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT; ++ goto out; ++ } ++ ++ if (!goal) ++ goto out; ++ ++ /* PCI controllers on most RISC systems tend to disconnect ++ * when a device tries to burst across a cache-line boundary. ++ * Therefore, letting tg3 do so just wastes PCI bandwidth. ++ * ++ * Unfortunately, for PCI-E there are only limited ++ * write-side controls for this, and thus for reads ++ * we will still get the disconnects. We'll also waste ++ * these PCI cycles for both read and write for chips ++ * other than 5700 and 5701 which do not implement the ++ * boundary bits. ++ */ ++ if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) { ++ switch (cacheline_size) { ++ case 16: ++ case 32: ++ case 64: ++ case 128: ++ if (goal == BOUNDARY_SINGLE_CACHELINE) { ++ val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX | ++ DMA_RWCTRL_WRITE_BNDRY_128_PCIX); ++ } else { ++ val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX | ++ DMA_RWCTRL_WRITE_BNDRY_384_PCIX); ++ } ++ break; ++ ++ case 256: ++ val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX | ++ DMA_RWCTRL_WRITE_BNDRY_256_PCIX); ++ break; ++ ++ default: ++ val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX | ++ DMA_RWCTRL_WRITE_BNDRY_384_PCIX); ++ break; ++ } ++ } else if (tg3_flag(tp, PCI_EXPRESS)) { ++ switch (cacheline_size) { ++ case 16: ++ case 32: ++ case 64: ++ if (goal == BOUNDARY_SINGLE_CACHELINE) { ++ val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE; ++ val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE; ++ break; ++ } ++ /* fallthrough */ ++ case 128: ++ default: ++ val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE; ++ val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE; ++ break; ++ } ++ } else { ++ switch (cacheline_size) { ++ case 16: ++ if (goal == BOUNDARY_SINGLE_CACHELINE) { ++ val |= (DMA_RWCTRL_READ_BNDRY_16 | ++ DMA_RWCTRL_WRITE_BNDRY_16); ++ break; ++ } ++ /* fallthrough */ ++ case 32: ++ if (goal == BOUNDARY_SINGLE_CACHELINE) { ++ val |= (DMA_RWCTRL_READ_BNDRY_32 | ++ DMA_RWCTRL_WRITE_BNDRY_32); ++ break; ++ } ++ /* fallthrough */ ++ case 64: ++ if (goal == BOUNDARY_SINGLE_CACHELINE) { ++ val |= (DMA_RWCTRL_READ_BNDRY_64 | ++ DMA_RWCTRL_WRITE_BNDRY_64); ++ break; ++ } ++ /* fallthrough */ ++ case 128: ++ if (goal == BOUNDARY_SINGLE_CACHELINE) { ++ val |= (DMA_RWCTRL_READ_BNDRY_128 | ++ DMA_RWCTRL_WRITE_BNDRY_128); ++ break; ++ } ++ /* fallthrough */ ++ case 256: ++ val |= (DMA_RWCTRL_READ_BNDRY_256 | ++ DMA_RWCTRL_WRITE_BNDRY_256); ++ break; ++ case 512: ++ val |= (DMA_RWCTRL_READ_BNDRY_512 | ++ DMA_RWCTRL_WRITE_BNDRY_512); ++ break; ++ case 1024: ++ default: ++ val |= (DMA_RWCTRL_READ_BNDRY_1024 | ++ DMA_RWCTRL_WRITE_BNDRY_1024); ++ break; ++ } ++ } ++ ++out: ++ return val; ++} ++ ++static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, ++ int size, bool to_device) ++{ ++ struct tg3_internal_buffer_desc test_desc; ++ u32 sram_dma_descs; ++ int i, ret; ++ ++ sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE; ++ ++ tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0); ++ tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0); ++ tw32(RDMAC_STATUS, 0); ++ tw32(WDMAC_STATUS, 0); ++ ++ tw32(BUFMGR_MODE, 0); ++ tw32(FTQ_RESET, 0); ++ ++ test_desc.addr_hi = ((u64) buf_dma) >> 32; ++ test_desc.addr_lo = buf_dma & 0xffffffff; ++ test_desc.nic_mbuf = 0x00002100; ++ test_desc.len = size; ++ ++ /* ++ * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz ++ * the *second* time the tg3 driver was getting loaded after an ++ * initial scan. ++ * ++ * Broadcom tells me: ++ * ...the DMA engine is connected to the GRC block and a DMA ++ * reset may affect the GRC block in some unpredictable way... ++ * The behavior of resets to individual blocks has not been tested. ++ * ++ * Broadcom noted the GRC reset will also reset all sub-components. ++ */ ++ if (to_device) { ++ test_desc.cqid_sqid = (13 << 8) | 2; ++ ++ tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE); ++ udelay(40); ++ } else { ++ test_desc.cqid_sqid = (16 << 8) | 7; ++ ++ tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE); ++ udelay(40); ++ } ++ test_desc.flags = 0x00000005; ++ ++ for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) { ++ u32 val; ++ ++ val = *(((u32 *)&test_desc) + i); ++ pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, ++ sram_dma_descs + (i * sizeof(u32))); ++ pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val); ++ } ++ pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0); ++ ++ if (to_device) ++ tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs); ++ else ++ tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs); ++ ++ ret = -ENODEV; ++ for (i = 0; i < 40; i++) { ++ u32 val; ++ ++ if (to_device) ++ val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ); ++ else ++ val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ); ++ if ((val & 0xffff) == sram_dma_descs) { ++ ret = 0; ++ break; ++ } ++ ++ udelay(100); ++ } ++ ++ return ret; ++} ++ ++#define TEST_BUFFER_SIZE 0x2000 ++ ++#if (LINUX_VERSION_CODE >= 0x2060a) ++static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets) = { ++ { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) }, ++ { }, ++}; ++#endif ++ ++static int __devinit tg3_test_dma(struct tg3 *tp) ++{ ++ dma_addr_t buf_dma; ++ u32 *buf, saved_dma_rwctrl; ++ int ret = 0; ++ ++ buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, ++ &buf_dma, GFP_KERNEL); ++ if (!buf) { ++ ret = -ENOMEM; ++ goto out_nofree; ++ } ++ ++ tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) | ++ (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT)); ++ ++ tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl); ++ ++ if (tg3_flag(tp, 57765_PLUS)) ++ goto out; ++ ++ if (tg3_flag(tp, PCI_EXPRESS)) { ++ /* DMA read watermark not used on PCIE */ ++ tp->dma_rwctrl |= 0x00180000; ++ } else if (!tg3_flag(tp, PCIX_MODE)) { ++ if (tg3_asic_rev(tp) == ASIC_REV_5705 || ++ tg3_asic_rev(tp) == ASIC_REV_5750) ++ tp->dma_rwctrl |= 0x003f0000; ++ else ++ tp->dma_rwctrl |= 0x003f000f; ++ } else { ++ if (tg3_asic_rev(tp) == ASIC_REV_5703 || ++ tg3_asic_rev(tp) == ASIC_REV_5704) { ++ u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f); ++ u32 read_water = 0x7; ++ ++ /* If the 5704 is behind the EPB bridge, we can ++ * do the less restrictive ONE_DMA workaround for ++ * better performance. ++ */ ++ if (tg3_flag(tp, 40BIT_DMA_BUG) && ++ tg3_asic_rev(tp) == ASIC_REV_5704) ++ tp->dma_rwctrl |= 0x8000; ++ else if (ccval == 0x6 || ccval == 0x7) ++ tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA; ++ ++ if (tg3_asic_rev(tp) == ASIC_REV_5703) ++ read_water = 4; ++ /* Set bit 23 to enable PCIX hw bug fix */ ++ tp->dma_rwctrl |= ++ (read_water << DMA_RWCTRL_READ_WATER_SHIFT) | ++ (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) | ++ (1 << 23); ++ } else if (tg3_asic_rev(tp) == ASIC_REV_5780) { ++ /* 5780 always in PCIX mode */ ++ tp->dma_rwctrl |= 0x00144000; ++ } else if (tg3_asic_rev(tp) == ASIC_REV_5714) { ++ /* 5714 always in PCIX mode */ ++ tp->dma_rwctrl |= 0x00148000; ++ } else { ++ tp->dma_rwctrl |= 0x001b000f; ++ } ++ } ++ if (tg3_flag(tp, ONE_DMA_AT_ONCE)) ++ tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA; ++ ++ if (tg3_asic_rev(tp) == ASIC_REV_5703 || ++ tg3_asic_rev(tp) == ASIC_REV_5704) ++ tp->dma_rwctrl &= 0xfffffff0; ++ ++ if (tg3_asic_rev(tp) == ASIC_REV_5700 || ++ tg3_asic_rev(tp) == ASIC_REV_5701) { ++ /* Remove this if it causes problems for some boards. */ ++ tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT; ++ ++ /* On 5700/5701 chips, we need to set this bit. ++ * Otherwise the chip will issue cacheline transactions ++ * to streamable DMA memory with not all the byte ++ * enables turned on. This is an error on several ++ * RISC PCI controllers, in particular sparc64. ++ * ++ * On 5703/5704 chips, this bit has been reassigned ++ * a different meaning. In particular, it is used ++ * on those chips to enable a PCI-X workaround. ++ */ ++ tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE; ++ } ++ ++ tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl); ++ ++#if 0 ++ /* Unneeded, already done by tg3_get_invariants. */ ++ tg3_switch_clocks(tp); ++#endif ++ ++ if (tg3_asic_rev(tp) != ASIC_REV_5700 && ++ tg3_asic_rev(tp) != ASIC_REV_5701) ++ goto out; ++ ++ /* It is best to perform DMA test with maximum write burst size ++ * to expose the 5700/5701 write DMA bug. ++ */ ++ saved_dma_rwctrl = tp->dma_rwctrl; ++ tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK; ++ tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl); ++ ++ while (1) { ++ u32 *p = buf, i; ++ ++ for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) ++ p[i] = i; ++ ++ /* Send the buffer to the chip. */ ++ ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, true); ++ if (ret) { ++ dev_err(&tp->pdev->dev, ++ "%s: Buffer write failed. err = %d\n", ++ __func__, ret); ++ break; ++ } ++ ++#if 0 ++ /* validate data reached card RAM correctly. */ ++ for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) { ++ u32 val; ++ tg3_read_mem(tp, 0x2100 + (i*4), &val); ++ if (le32_to_cpu(val) != p[i]) { ++ dev_err(&tp->pdev->dev, ++ "%s: Buffer corrupted on device! " ++ "(%d != %d)\n", __func__, val, i); ++ /* ret = -ENODEV here? */ ++ } ++ p[i] = 0; ++ } ++#endif ++ /* Now read it back. */ ++ ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, false); ++ if (ret) { ++ dev_err(&tp->pdev->dev, "%s: Buffer read failed. " ++ "err = %d\n", __func__, ret); ++ break; ++ } ++ ++ /* Verify it. */ ++ for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) { ++ if (p[i] == i) ++ continue; ++ ++ if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) != ++ DMA_RWCTRL_WRITE_BNDRY_16) { ++ tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK; ++ tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16; ++ tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl); ++ break; ++ } else { ++ dev_err(&tp->pdev->dev, ++ "%s: Buffer corrupted on read back! " ++ "(%d != %d)\n", __func__, p[i], i); ++ ret = -ENODEV; ++ goto out; ++ } ++ } ++ ++ if (i == (TEST_BUFFER_SIZE / sizeof(u32))) { ++ /* Success. */ ++ ret = 0; ++ break; ++ } ++ } ++ if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) != ++ DMA_RWCTRL_WRITE_BNDRY_16) { ++ /* DMA test passed without adjusting DMA boundary, ++ * now look for chipsets that are known to expose the ++ * DMA bug without failing the test. ++ */ ++#if (LINUX_VERSION_CODE < 0x2060a) ++ if (pci_find_device(PCI_VENDOR_ID_APPLE, ++ PCI_DEVICE_ID_APPLE_UNI_N_PCI15, NULL)) ++#else ++ if (pci_dev_present(tg3_dma_wait_state_chipsets)) ++#endif ++ { ++ tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK; ++ tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16; ++ } else { ++ /* Safe to use the calculated DMA boundary. */ ++ tp->dma_rwctrl = saved_dma_rwctrl; ++ } ++ ++ tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl); ++ } ++ ++out: ++ dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma); ++out_nofree: ++ return ret; ++} ++ ++static void __devinit tg3_init_bufmgr_config(struct tg3 *tp) ++{ ++ if (tg3_flag(tp, 57765_PLUS)) { ++ tp->bufmgr_config.mbuf_read_dma_low_water = ++ DEFAULT_MB_RDMA_LOW_WATER_5705; ++ tp->bufmgr_config.mbuf_mac_rx_low_water = ++ DEFAULT_MB_MACRX_LOW_WATER_57765; ++ tp->bufmgr_config.mbuf_high_water = ++ DEFAULT_MB_HIGH_WATER_57765; ++ ++ tp->bufmgr_config.mbuf_read_dma_low_water_jumbo = ++ DEFAULT_MB_RDMA_LOW_WATER_5705; ++ tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo = ++ DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765; ++ tp->bufmgr_config.mbuf_high_water_jumbo = ++ DEFAULT_MB_HIGH_WATER_JUMBO_57765; ++ } else if (tg3_flag(tp, 5705_PLUS)) { ++ tp->bufmgr_config.mbuf_read_dma_low_water = ++ DEFAULT_MB_RDMA_LOW_WATER_5705; ++ tp->bufmgr_config.mbuf_mac_rx_low_water = ++ DEFAULT_MB_MACRX_LOW_WATER_5705; ++ tp->bufmgr_config.mbuf_high_water = ++ DEFAULT_MB_HIGH_WATER_5705; ++ if (tg3_asic_rev(tp) == ASIC_REV_5906) { ++ tp->bufmgr_config.mbuf_mac_rx_low_water = ++ DEFAULT_MB_MACRX_LOW_WATER_5906; ++ tp->bufmgr_config.mbuf_high_water = ++ DEFAULT_MB_HIGH_WATER_5906; ++ } ++ ++ tp->bufmgr_config.mbuf_read_dma_low_water_jumbo = ++ DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780; ++ tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo = ++ DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780; ++ tp->bufmgr_config.mbuf_high_water_jumbo = ++ DEFAULT_MB_HIGH_WATER_JUMBO_5780; ++ } else { ++ tp->bufmgr_config.mbuf_read_dma_low_water = ++ DEFAULT_MB_RDMA_LOW_WATER; ++ tp->bufmgr_config.mbuf_mac_rx_low_water = ++ DEFAULT_MB_MACRX_LOW_WATER; ++ tp->bufmgr_config.mbuf_high_water = ++ DEFAULT_MB_HIGH_WATER; ++ ++ tp->bufmgr_config.mbuf_read_dma_low_water_jumbo = ++ DEFAULT_MB_RDMA_LOW_WATER_JUMBO; ++ tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo = ++ DEFAULT_MB_MACRX_LOW_WATER_JUMBO; ++ tp->bufmgr_config.mbuf_high_water_jumbo = ++ DEFAULT_MB_HIGH_WATER_JUMBO; ++ } ++ ++ tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER; ++ tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER; ++} ++ ++static char * __devinit tg3_phy_string(struct tg3 *tp) ++{ ++ switch (tp->phy_id & TG3_PHY_ID_MASK) { ++ case TG3_PHY_ID_BCM5400: return "5400"; ++ case TG3_PHY_ID_BCM5401: return "5401"; ++ case TG3_PHY_ID_BCM5411: return "5411"; ++ case TG3_PHY_ID_BCM5701: return "5701"; ++ case TG3_PHY_ID_BCM5703: return "5703"; ++ case TG3_PHY_ID_BCM5704: return "5704"; ++ case TG3_PHY_ID_BCM5705: return "5705"; ++ case TG3_PHY_ID_BCM5750: return "5750"; ++ case TG3_PHY_ID_BCM5752: return "5752"; ++ case TG3_PHY_ID_BCM5714: return "5714"; ++ case TG3_PHY_ID_BCM5780: return "5780"; ++ case TG3_PHY_ID_BCM5755: return "5755"; ++ case TG3_PHY_ID_BCM5787: return "5787"; ++ case TG3_PHY_ID_BCM5784: return "5784"; ++ case TG3_PHY_ID_BCM5756: return "5722/5756"; ++ case TG3_PHY_ID_BCM5906: return "5906"; ++ case TG3_PHY_ID_BCM5761: return "5761"; ++#ifndef BCM_INCLUDE_PHYLIB_SUPPORT ++ case TG3_PHY_ID_BCM50610: return "50610"; ++ case TG3_PHY_ID_BCM50610M: return "50610M"; ++ case TG3_PHY_ID_BCM50612E: return "50612E"; ++ case TG3_PHY_ID_BCMAC131: return "AC131"; ++ case TG3_PHY_ID_BCM57780: return "57780"; ++#endif ++ case TG3_PHY_ID_BCM5718C: return "5718C"; ++ case TG3_PHY_ID_BCM5718S: return "5718S"; ++ case TG3_PHY_ID_BCM57765: return "57765"; ++ case TG3_PHY_ID_BCM5719C: return "5719C"; ++ case TG3_PHY_ID_BCM5720C: return "5720C"; ++ case TG3_PHY_ID_BCM5762: return "5762C"; ++ case TG3_PHY_ID_BCM8002: return "8002/serdes"; ++ case 0: return "serdes"; ++ default: return "unknown"; ++ } ++} ++ ++static char * __devinit tg3_bus_string(struct tg3 *tp, char *str) ++{ ++ if (tg3_flag(tp, PCI_EXPRESS)) { ++ strcpy(str, "PCI Express"); ++ return str; ++ } else if (tg3_flag(tp, PCIX_MODE)) { ++ u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f; ++ ++ strcpy(str, "PCIX:"); ++ ++ if ((clock_ctrl == 7) || ++ ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) == ++ GRC_MISC_CFG_BOARD_ID_5704CIOBE)) ++ strcat(str, "133MHz"); ++ else if (clock_ctrl == 0) ++ strcat(str, "33MHz"); ++ else if (clock_ctrl == 2) ++ strcat(str, "50MHz"); ++ else if (clock_ctrl == 4) ++ strcat(str, "66MHz"); ++ else if (clock_ctrl == 6) ++ strcat(str, "100MHz"); ++ } else { ++ strcpy(str, "PCI:"); ++ if (tg3_flag(tp, PCI_HIGH_SPEED)) ++ strcat(str, "66MHz"); ++ else ++ strcat(str, "33MHz"); ++ } ++ if (tg3_flag(tp, PCI_32BIT)) ++ strcat(str, ":32-bit"); ++ else ++ strcat(str, ":64-bit"); ++ return str; ++} ++ ++static void __devinit tg3_init_coal(struct tg3 *tp) ++{ ++ struct ethtool_coalesce *ec = &tp->coal; ++ ++ memset(ec, 0, sizeof(*ec)); ++ ec->cmd = ETHTOOL_GCOALESCE; ++ ec->rx_coalesce_usecs = LOW_RXCOL_TICKS; ++ ec->tx_coalesce_usecs = LOW_TXCOL_TICKS; ++ ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES; ++ ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES; ++ ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT; ++ ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT; ++ ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT; ++ ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT; ++ ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS; ++ ++ if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD | ++ HOSTCC_MODE_CLRTICK_TXBD)) { ++ ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS; ++ ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS; ++ ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS; ++ ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS; ++ } ++ ++ if (tg3_flag(tp, 5705_PLUS)) { ++ ec->rx_coalesce_usecs_irq = 0; ++ ec->tx_coalesce_usecs_irq = 0; ++ ec->stats_block_coalesce_usecs = 0; ++ } ++} ++ ++static int __devinit tg3_init_one(struct pci_dev *pdev, ++ const struct pci_device_id *ent) ++{ ++ struct net_device *dev; ++ struct tg3 *tp; ++ int i, err, pm_cap; ++ u32 sndmbx, rcvmbx, intmbx; ++ char str[40]; ++ u64 dma_mask, persist_dma_mask; ++ DECLARE_MAC_BUF(mac); ++ netdev_features_t features = 0; ++ ++ printk_once(KERN_INFO "%s\n", version); ++ ++ err = pci_enable_device(pdev); ++ if (err) { ++ dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n"); ++ return err; ++ } ++ ++ err = pci_request_regions(pdev, DRV_MODULE_NAME); ++ if (err) { ++ dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n"); ++ goto err_out_disable_pdev; ++ } ++ ++ pci_set_master(pdev); ++ ++ /* Find power-management capability. */ ++ pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM); ++ if (pm_cap == 0) { ++ dev_err(&pdev->dev, ++ "Cannot find Power Management capability, aborting\n"); ++ err = -EIO; ++ goto err_out_free_res; ++ } ++ ++ err = pci_set_power_state(pdev, PCI_D0); ++ if (err) { ++ dev_err(&pdev->dev, "Transition to D0 failed, aborting\n"); ++ goto err_out_free_res; ++ } ++ ++ dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS); ++ if (!dev) { ++ dev_err(&pdev->dev, "Etherdev alloc failed, aborting\n"); ++ err = -ENOMEM; ++ goto err_out_power_down; ++ } ++ ++ SET_MODULE_OWNER(dev); ++#if (LINUX_VERSION_CODE >= 0x20419) ++ SET_NETDEV_DEV(dev, &pdev->dev); ++#endif ++ ++ pci_set_drvdata(pdev, dev); ++ ++ tp = netdev_priv(dev); ++ tp->pdev = pdev; ++ tp->dev = dev; ++ tp->pm_cap = pm_cap; ++ tp->rx_mode = TG3_DEF_RX_MODE; ++ tp->tx_mode = TG3_DEF_TX_MODE; ++ tp->irq_sync = 1; ++ ++ if (tg3_debug > 0) ++ tp->msg_enable = tg3_debug; ++ else ++ tp->msg_enable = TG3_DEF_MSG_ENABLE; ++ ++ if (pdev_is_ssb_gige_core(pdev)) { ++ tg3_flag_set(tp, IS_SSB_CORE); ++ if (ssb_gige_must_flush_posted_writes(pdev)) ++ tg3_flag_set(tp, FLUSH_POSTED_WRITES); ++ if (ssb_gige_one_dma_at_once(pdev)) ++ tg3_flag_set(tp, ONE_DMA_AT_ONCE); ++ if (ssb_gige_have_roboswitch(pdev)) { ++ tg3_flag_set(tp, USE_PHYLIB); ++ tg3_flag_set(tp, ROBOSWITCH); ++ } ++ if (ssb_gige_is_rgmii(pdev)) ++ tg3_flag_set(tp, RGMII_MODE); ++ } ++ ++ /* The word/byte swap controls here control register access byte ++ * swapping. DMA data byte swapping is controlled in the GRC_MODE ++ * setting below. ++ */ ++ tp->misc_host_ctrl = ++ MISC_HOST_CTRL_MASK_PCI_INT | ++ MISC_HOST_CTRL_WORD_SWAP | ++ MISC_HOST_CTRL_INDIR_ACCESS | ++ MISC_HOST_CTRL_PCISTATE_RW; ++ ++ /* The NONFRM (non-frame) byte/word swap controls take effect ++ * on descriptor entries, anything which isn't packet data. ++ * ++ * The StrongARM chips on the board (one for tx, one for rx) ++ * are running in big-endian mode. ++ */ ++ tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA | ++ GRC_MODE_WSWAP_NONFRM_DATA); ++#ifdef __BIG_ENDIAN ++ tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA; ++#endif ++ spin_lock_init(&tp->lock); ++ spin_lock_init(&tp->indirect_lock); ++#ifdef BCM_HAS_NEW_INIT_WORK ++ INIT_WORK(&tp->reset_task, tg3_reset_task); ++#else ++ INIT_WORK(&tp->reset_task, tg3_reset_task, tp); ++#endif ++ ++ tp->regs = pci_ioremap_bar(pdev, BAR_0); ++ if (!tp->regs) { ++ dev_err(&pdev->dev, "Cannot map device registers, aborting\n"); ++ err = -ENOMEM; ++ goto err_out_free_dev; ++ } ++ ++ if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 || ++ tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E || ++ tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S || ++ tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE || ++ tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 || ++ tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C || ++ tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 || ++ tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 || ++ tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 || ++ tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 || ++ tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 || ++ tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 || ++ tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 || ++ tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 || ++ tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787) { ++ tg3_flag_set(tp, ENABLE_APE); ++ tp->aperegs = pci_ioremap_bar(pdev, BAR_2); ++ if (!tp->aperegs) { ++ dev_err(&pdev->dev, ++ "Cannot map APE registers, aborting\n"); ++ err = -ENOMEM; ++ goto err_out_iounmap; ++ } ++ } ++ ++ tp->rx_pending = TG3_DEF_RX_RING_PENDING; ++ tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING; ++ ++ dev->ethtool_ops = &tg3_ethtool_ops; ++#ifdef GET_ETHTOOL_OP_EXT ++ set_ethtool_ops_ext(dev, &tg3_ethtool_ops_ext); ++#endif ++ ++#ifdef GET_NETDEV_OP_EXT ++ set_netdev_ops_ext(dev, &tg3_net_device_ops_ext); ++#endif ++ ++ dev->watchdog_timeo = TG3_TX_TIMEOUT; ++ dev->irq = pdev->irq; ++ ++ err = tg3_get_invariants(tp, ent); ++ if (err) { ++ dev_err(&pdev->dev, ++ "Problem fetching invariants of chip, aborting\n"); ++ goto err_out_apeunmap; ++ } ++ ++#ifdef BCM_HAS_NET_DEVICE_OPS ++ dev->netdev_ops = &tg3_netdev_ops; ++#else ++ dev->open = tg3_open; ++ dev->stop = tg3_close; ++ dev->get_stats = tg3_get_stats; ++ dev->set_multicast_list = tg3_set_rx_mode; ++ dev->set_mac_address = tg3_set_mac_addr; ++ dev->do_ioctl = tg3_ioctl; ++ dev->tx_timeout = tg3_tx_timeout; ++ dev->change_mtu = tg3_change_mtu; ++#ifndef BCM_HAS_NEW_VLAN_INTERFACE ++ dev->vlan_rx_register = tg3_vlan_rx_register; ++ dev->vlan_rx_kill_vid = tg3_vlan_rx_kill_vid; ++#endif ++#ifdef CONFIG_NET_POLL_CONTROLLER ++ dev->poll_controller = tg3_poll_controller; ++#endif ++ ++ tp->dev->hard_start_xmit = tg3_start_xmit; ++#endif ++ ++ /* The EPB bridge inside 5714, 5715, and 5780 and any ++ * device behind the EPB cannot support DMA addresses > 40-bit. ++ * On 64-bit systems with IOMMU, use 40-bit dma_mask. ++ * On 64-bit systems without IOMMU, use 64-bit dma_mask and ++ * do DMA address check in tg3_start_xmit(). ++ */ ++ if (tg3_flag(tp, IS_5788)) ++ persist_dma_mask = dma_mask = DMA_BIT_MASK(32); ++ else if (tg3_flag(tp, 40BIT_DMA_BUG)) { ++ persist_dma_mask = dma_mask = DMA_BIT_MASK(40); ++#ifdef CONFIG_HIGHMEM ++ dma_mask = DMA_BIT_MASK(64); ++#endif ++ } else ++ persist_dma_mask = dma_mask = DMA_BIT_MASK(64); ++ ++ /* Configure DMA attributes. */ ++ if (dma_mask > DMA_BIT_MASK(32)) { ++ err = pci_set_dma_mask(pdev, dma_mask); ++ if (!err) { ++ features |= NETIF_F_HIGHDMA; ++ err = pci_set_consistent_dma_mask(pdev, ++ persist_dma_mask); ++ if (err < 0) { ++ dev_err(&pdev->dev, "Unable to obtain " ++ "DMA for consistent allocations\n"); ++ goto err_out_apeunmap; ++ } ++ } ++ } ++ if (err || dma_mask == DMA_BIT_MASK(32)) { ++ err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); ++ if (err) { ++ dev_err(&pdev->dev, ++ "No usable DMA configuration, aborting\n"); ++ goto err_out_apeunmap; ++ } ++ } ++ ++ tg3_init_bufmgr_config(tp); ++ ++ /* 5700 B0 chips do not support checksumming correctly due ++ * to hardware bugs. ++ */ ++ if (tg3_chip_rev_id(tp) != CHIPREV_ID_5700_B0) { ++ features |= NETIF_F_SG | NETIF_F_GRO | NETIF_F_RXCSUM; ++ ++#ifndef BCM_NO_IPV6_CSUM ++ features |= NETIF_F_IP_CSUM; ++ if (tg3_flag(tp, 5755_PLUS)) ++ features |= NETIF_F_IPV6_CSUM; ++#else ++ if (tg3_flag(tp, 5755_PLUS)) ++ features |= NETIF_F_HW_CSUM; ++ else ++ features |= NETIF_F_IP_CSUM; ++#endif ++ } ++ ++#if TG3_TSO_SUPPORT != 0 ++ /* TSO is on by default on chips that support hardware TSO. ++ * Firmware TSO on older chips gives lower performance, so it ++ * is off by default, but can be enabled using ethtool. ++ */ ++ if ((tg3_flag(tp, HW_TSO_1) || ++ tg3_flag(tp, HW_TSO_2) || ++ tg3_flag(tp, HW_TSO_3)) && ++ (features & (NETIF_F_IP_CSUM | NETIF_F_HW_CSUM))) ++ features |= NETIF_F_TSO; ++ if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) { ++ if (features & NETIF_F_IPV6_CSUM) ++ features |= NETIF_F_TSO6; ++ if (tg3_flag(tp, HW_TSO_3) || ++ tg3_asic_rev(tp) == ASIC_REV_5761 || ++ (tg3_asic_rev(tp) == ASIC_REV_5784 && ++ tg3_chip_rev(tp) != CHIPREV_5784_AX) || ++ tg3_asic_rev(tp) == ASIC_REV_5785 || ++ tg3_asic_rev(tp) == ASIC_REV_57780) ++ features |= NETIF_F_TSO_ECN; ++ } ++ ++#if defined(__VMKLNX__) ++ features = tg3_vmware_tune_tso(tp, features); ++#endif /* __VMKLNX__ */ ++#endif /* TG3_TSO_SUPPORT != 0 */ ++ ++ dev->features |= features | NETIF_F_HW_VLAN_CTAG_TX | ++ NETIF_F_HW_VLAN_CTAG_RX; ++ dev->vlan_features |= features; ++ ++#ifdef BCM_HAS_FIX_FEATURES ++ /* ++ * Add loopback capability only for a subset of devices that support ++ * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY ++ * loopback for the remaining devices. ++ */ ++ if (tg3_asic_rev(tp) != ASIC_REV_5780 && ++ !tg3_flag(tp, CPMU_PRESENT)) ++ /* Add the loopback capability */ ++ features |= NETIF_F_LOOPBACK; ++#endif ++ ++#if defined(GET_NETDEV_OP_EXT) ++ set_netdev_hw_features(dev, get_netdev_hw_features(dev) | features); ++#else ++ dev->hw_features |= features; ++#endif ++ ++#ifdef IFF_UNICAST_FLT ++ dev->priv_flags |= IFF_UNICAST_FLT; ++#endif ++ ++ if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 && ++ !tg3_flag(tp, TSO_CAPABLE) && ++ !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) { ++ tg3_flag_set(tp, MAX_RXPEND_64); ++ tp->rx_pending = 63; ++ } ++ ++ err = tg3_get_device_address(tp); ++ if (err) { ++ dev_err(&pdev->dev, ++ "Could not obtain valid ethernet address, aborting\n"); ++ goto err_out_apeunmap; ++ } ++ ++ intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW; ++ rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW; ++ sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW; ++ for (i = 0; i < tp->irq_max; i++) { ++ struct tg3_napi *tnapi = &tp->napi[i]; ++ ++ tnapi->tp = tp; ++ tnapi->tx_pending = TG3_DEF_TX_RING_PENDING; ++ ++ tnapi->int_mbox = intmbx; ++ if (i <= 4) ++ intmbx += 0x8; ++ else { ++ if (intmbx & 0x4) ++ intmbx -= 0x4; ++ else ++ intmbx += 0xc; ++ } ++ ++ tnapi->consmbox = rcvmbx; ++ tnapi->prodmbox = sndmbx; ++ ++#ifdef TG3_VMWARE_NETQ_ENABLE ++ tg3_setup_prod_mboxes(tp, i); ++#endif ++ ++ if (i) ++ tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1); ++ else ++ tnapi->coal_now = HOSTCC_MODE_NOW; ++ ++ if (!tg3_flag(tp, SUPPORT_MSIX)) ++ break; ++ ++#ifdef TG3_VMWARE_NETQ_ENABLE ++ /* ++ * If we support NETQ, the first interrupt vector is the default ++ * rx queue. The first four queues follow the legacy RSS mailbox ++ * enumeration scheme. Then, the enumerations follow the quirky ++ * new way. ++ */ ++ if(tg3_flag(tp, IOV_CAPABLE)) { ++ if (i > 3) { ++ if (rcvmbx & 0x4) ++ rcvmbx -= 0x4; ++ else ++ rcvmbx += 0xc; ++ } else ++ rcvmbx += 0x8; ++ } ++ ++ if (!i) ++ continue; ++ ++ if (!tg3_flag(tp, IOV_CAPABLE)) ++ rcvmbx += 0x8; ++#else ++ /* ++ * If we support MSIX, we'll be using RSS. If we're using ++ * RSS, the first vector only handles link interrupts and the ++ * remaining vectors handle rx and tx interrupts. Reuse the ++ * mailbox values for the next iteration. The values we setup ++ * above are still useful for the single vectored mode. ++ */ ++ if (!i) ++ continue; ++ ++ rcvmbx += 0x8; ++#endif ++ ++ if (sndmbx & 0x4) ++ sndmbx -= 0x4; ++ else ++ sndmbx += 0xc; ++ } ++ ++ /* ++ * Reset chip in case UNDI or EFI driver did not shutdown ++ * DMA self test will enable WDMAC and we'll see (spurious) ++ * pending DMA on the PCI bus at that point. ++ */ ++ if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) || ++ (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) { ++ tw32(MEMARB_MODE, MEMARB_MODE_ENABLE); ++ tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); ++ } ++ ++ err = tg3_test_dma(tp); ++ if (err) { ++ dev_err(&pdev->dev, "DMA engine test failed, aborting\n"); ++ goto err_out_apeunmap; ++ } ++ ++ tg3_init_coal(tp); ++ ++ if (tg3_asic_rev(tp) == ASIC_REV_5719 || ++ (tg3_asic_rev(tp) == ASIC_REV_5720 && ++ tp->pdev->device != TG3PCI_DEVICE_TIGON3_5717_C) || ++ tg3_asic_rev(tp) != ASIC_REV_5762) ++ tg3_flag_set(tp, PTP_CAPABLE); ++ ++#ifdef TG3_VMWARE_NETQ_ENABLE ++ if (tg3_flag(tp, IOV_CAPABLE)) ++ tg3_netq_init(tp); ++#endif ++ ++ tg3_timer_init(tp); ++ ++ err = register_netdev(dev); ++ if (err) { ++ dev_err(&pdev->dev, "Cannot register net device, aborting\n"); ++ goto err_out_apeunmap; ++ } ++ ++ netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %s\n", ++ tp->board_part_number, ++ tg3_chip_rev_id(tp), ++ tg3_bus_string(tp, str), ++ print_mac(mac, dev->dev_addr)); ++ ++#ifdef BCM_INCLUDE_PHYLIB_SUPPORT ++ if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) { ++ struct phy_device *phydev; ++ phydev = tp->mdio_bus->phy_map[tp->phy_addr]; ++ netdev_info(dev, ++ "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n", ++ phydev->drv->name, dev_name(&phydev->dev)); ++ } else ++#endif ++ { ++ char *ethtype; ++ ++ if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY) ++ ethtype = "10/100Base-TX"; ++ else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) ++ ethtype = "1000Base-SX"; ++ else ++ ethtype = "10/100/1000Base-T"; ++ ++ netdev_info(dev, "attached PHY is %s (%s Ethernet) " ++ "(WireSpeed[%d], EEE[%d])\n", ++ tg3_phy_string(tp), ethtype, ++ (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0, ++ (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0); ++ } ++ ++ netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n", ++ (dev->features & NETIF_F_RXCSUM) != 0, ++ tg3_flag(tp, USE_LINKCHG_REG) != 0, ++ (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0, ++ tg3_flag(tp, ENABLE_ASF) != 0, ++ tg3_flag(tp, TSO_CAPABLE) != 0); ++ netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n", ++ tp->dma_rwctrl, ++ pdev->dma_mask == DMA_BIT_MASK(32) ? 32 : ++ ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64); ++ ++#if defined(__VMKLNX__) ++ netdev_info(dev, "Jumbo Frames capable[%d]\n", ++ tg3_flag(tp, JUMBO_CAPABLE) != 0); ++#ifdef TG3_VMWARE_NETQ_ENABLE ++ if(tg3_flag(tp, IOV_CAPABLE)) ++ netdev_info(dev, "NetQueue module parameter index [%d]\n", ++ tp->vmware.netq.index); ++#endif ++#endif ++ ++#ifdef BCM_HAS_PCI_EEH_SUPPORT ++ pci_save_state(pdev); ++#endif ++ ++ ++#if defined(__VMKLNX__) && (VMWARE_ESX_DDK_VERSION >= 55000) ++ if (!disable_fw_dmp) { ++ static int nic_idx; ++ ++ /* sanity check the force_netq parameter */ ++ if (nic_idx >= TG3_MAX_NIC) { ++ dev_err(&pdev->dev, ++ "Invalid number of dev(%d)\n", ++ nic_idx); ++ return -EINVAL; ++ } ++ tp->nic_idx = nic_idx; ++ /* allow fw dmp for newer chip only */ ++ if (tg3_asic_rev(tp) > ASIC_REV_5906) ++ fwdmp_tp_ptr[tp->nic_idx] = tp; ++ else ++ netdev_info(dev, "No FW dump support in legacy chip\n" ++ ); ++ nic_idx++; ++ } ++#endif /*defined(__VMKLNX__) && (VMWARE_ESX_DDK_VERSION >= 55000) */ ++ return 0; ++ ++err_out_apeunmap: ++ if (tp->aperegs) { ++ iounmap(tp->aperegs); ++ tp->aperegs = NULL; ++ } ++ ++err_out_iounmap: ++ if (tp->regs) { ++ iounmap(tp->regs); ++ tp->regs = NULL; ++ } ++ ++err_out_free_dev: ++#if (LINUX_VERSION_CODE >= 0x20418) ++ free_netdev(dev); ++#else ++ kfree(dev); ++#endif ++ ++err_out_power_down: ++ pci_set_power_state(pdev, PCI_D3hot); ++ ++err_out_free_res: ++ pci_release_regions(pdev); ++ ++err_out_disable_pdev: ++ if (pci_is_enabled(pdev)) ++ pci_disable_device(pdev); ++ pci_set_drvdata(pdev, NULL); ++ return err; ++} ++ ++static void __devexit tg3_remove_one(struct pci_dev *pdev) ++{ ++ struct net_device *dev = pci_get_drvdata(pdev); ++ ++ if (dev) { ++ struct tg3 *tp = netdev_priv(dev); ++ ++ if (tp->fw) ++ tg3_priv_release_firmware(tp->fw); ++ ++ tg3_reset_task_cancel(tp); ++ ++ if (tg3_flag(tp, USE_PHYLIB)) { ++ tg3_phy_fini(tp); ++ tg3_mdio_fini(tp); ++ } ++ ++ unregister_netdev(dev); ++ ++ if (tp->aperegs) { ++ iounmap(tp->aperegs); ++ tp->aperegs = NULL; ++ } ++ if (tp->regs) { ++ iounmap(tp->regs); ++ tp->regs = NULL; ++ } ++#if (LINUX_VERSION_CODE >= 0x20418) ++ free_netdev(dev); ++#else ++ kfree(dev); ++#endif ++ pci_release_regions(pdev); ++ pci_disable_device(pdev); ++ pci_set_drvdata(pdev, NULL); ++ } ++} ++ ++#undef SIMPLE_DEV_PM_OPS ++#ifdef SIMPLE_DEV_PM_OPS ++static int tg3_suspend(struct device *device) ++#else ++static int tg3_suspend(struct pci_dev *pdev, pm_message_t state) ++#endif ++{ ++#ifdef SIMPLE_DEV_PM_OPS ++ struct pci_dev *pdev = to_pci_dev(device); ++#endif ++ struct net_device *dev = pci_get_drvdata(pdev); ++ struct tg3 *tp = netdev_priv(dev); ++ int err = 0; ++ ++ if (tg3_invalid_pci_state(tp, state)) ++ return -EINVAL; ++ ++ tg3_pci_save_state(tp); ++ ++ rtnl_lock(); ++ ++ if (!netif_running(dev)) ++ goto power_down; ++ ++ tg3_reset_task_cancel(tp); ++ tg3_phy_stop(tp); ++ tg3_netif_stop(tp); ++ ++ tg3_timer_stop(tp); ++ ++ tg3_full_lock(tp, 1); ++ tg3_disable_ints(tp); ++ tg3_full_unlock(tp); ++ ++ netif_device_detach(dev); ++ ++ tg3_full_lock(tp, 0); ++ tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); ++ tg3_flag_clear(tp, INIT_COMPLETE); ++ tg3_full_unlock(tp); ++ ++ err = tg3_power_down_prepare(tp); ++ if (err) { ++ int err2; ++ ++ tg3_full_lock(tp, 0); ++ ++ tg3_flag_set(tp, INIT_COMPLETE); ++ err2 = tg3_restart_hw(tp, true); ++ if (err2) ++ goto out; ++ ++ tg3_timer_start(tp); ++ ++ netif_device_attach(dev); ++ tg3_netif_start(tp); ++ ++out: ++ tg3_full_unlock(tp); ++ ++ if (!err2) ++ tg3_phy_start(tp); ++ } ++ ++power_down: ++#ifndef SIMPLE_DEV_PM_OPS ++ if (!err) ++ tg3_power_down(tp); ++#endif ++ ++ rtnl_unlock(); ++ return err; ++} ++ ++#ifdef SIMPLE_DEV_PM_OPS ++static int tg3_resume(struct device *device) ++#else ++static int tg3_resume(struct pci_dev *pdev) ++#endif ++{ ++#ifdef SIMPLE_DEV_PM_OPS ++ struct pci_dev *pdev = to_pci_dev(device); ++#endif ++ struct net_device *dev = pci_get_drvdata(pdev); ++ struct tg3 *tp = netdev_priv(dev); ++ int err = 0; ++ ++ tg3_pci_restore_state(tp); ++ ++ rtnl_lock(); ++ ++ if (!netif_running(dev)) ++ goto unlock; ++ ++ err = tg3_power_up(tp); ++ if (err) ++ goto unlock; ++ ++ tg3_5780_class_intx_workaround(tp); ++ ++ netif_device_attach(dev); ++ ++ tg3_ape_driver_state_change(tp, RESET_KIND_INIT); ++ tg3_full_lock(tp, 0); ++ ++ tg3_flag_set(tp, INIT_COMPLETE); ++ err = tg3_restart_hw(tp, ++ !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)); ++ if (err) ++ goto out; ++ ++ tg3_timer_start(tp); ++ ++ tg3_netif_start(tp); ++ ++out: ++ tg3_full_unlock(tp); ++ ++ if (!err) ++ tg3_phy_start(tp); ++ ++unlock: ++ rtnl_unlock(); ++ return err; ++} ++#ifdef BCM_HAS_PCI_PMOPS_SHUTDOWN ++#ifdef SIMPLE_DEV_PM_OPS ++static void tg3_shutdown(struct device *device) ++#else ++static void tg3_shutdown(struct pci_dev *pdev) ++#endif ++{ ++#ifdef SIMPLE_DEV_PM_OPS ++ struct pci_dev *pdev = to_pci_dev(device); ++#endif ++ struct net_device *dev = pci_get_drvdata(pdev); ++ struct tg3 *tp = netdev_priv(dev); ++ ++ rtnl_lock(); ++ netif_device_detach(dev); ++ ++ if (netif_running(dev)) ++#ifdef __VMKLNX__ /* ! BNX2X_UPSTREAM */ ++ if (dev->flags & IFF_UP) ++#endif ++ dev_close(dev); ++ ++ if (system_state == SYSTEM_POWER_OFF) ++ tg3_power_down(tp); ++ ++ rtnl_unlock(); ++} ++#endif /*BCM_HAS_PCI_PMOPS_SHUTDOWN*/ ++ ++#ifdef SIMPLE_DEV_PM_OPS ++#ifdef CONFIG_PM_SLEEP ++static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume); ++#define TG3_PM_OPS (&tg3_pm_ops) ++ ++#else ++ ++#define TG3_PM_OPS NULL ++ ++#endif /* CONFIG_PM_SLEEP */ ++#endif ++ ++#ifdef BCM_HAS_PCI_EEH_SUPPORT ++/** ++ * tg3_io_error_detected - called when PCI error is detected ++ * @pdev: Pointer to PCI device ++ * @state: The current pci connection state ++ * ++ * This function is called after a PCI bus error affecting ++ * this device has been detected. ++ */ ++static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev, ++ pci_channel_state_t state) ++{ ++ struct net_device *netdev = pci_get_drvdata(pdev); ++ struct tg3 *tp = netdev_priv(netdev); ++ pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET; ++ ++ netdev_info(netdev, "PCI I/O error detected\n"); ++ ++ rtnl_lock(); ++ ++ /* We probably don't have netdev yet */ ++ if (!netdev || !netif_running(netdev)) ++ goto done; ++ ++ tg3_phy_stop(tp); ++ ++ tg3_netif_stop(tp); ++ ++ tg3_timer_stop(tp); ++ ++ /* Want to make sure that the reset task doesn't run */ ++ tg3_reset_task_cancel(tp); ++ ++ netif_device_detach(netdev); ++ ++ /* Clean up software state, even if MMIO is blocked */ ++ tg3_full_lock(tp, 0); ++ tg3_halt(tp, RESET_KIND_SHUTDOWN, 0); ++ tg3_full_unlock(tp); ++ ++done: ++ if (state == pci_channel_io_perm_failure) { ++ if (netdev) { ++ tg3_napi_enable(tp); ++ dev_close(netdev); ++ } ++ err = PCI_ERS_RESULT_DISCONNECT; ++ } else { ++ pci_disable_device(pdev); ++ } ++ ++ rtnl_unlock(); ++ ++ return err; ++} ++ ++/** ++ * tg3_io_slot_reset - called after the pci bus has been reset. ++ * @pdev: Pointer to PCI device ++ * ++ * Restart the card from scratch, as if from a cold-boot. ++ * At this point, the card has exprienced a hard reset, ++ * followed by fixups by BIOS, and has its config space ++ * set up identically to what it was at cold boot. ++ */ ++static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev) ++{ ++ struct net_device *netdev = pci_get_drvdata(pdev); ++ struct tg3 *tp = netdev_priv(netdev); ++ pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT; ++ int err; ++ ++ rtnl_lock(); ++ ++ if (pci_enable_device(pdev)) { ++ dev_err(&pdev->dev, ++ "Cannot re-enable PCI device after reset.\n"); ++ goto done; ++ } ++ ++ pci_set_master(pdev); ++ pci_restore_state(pdev); ++ pci_save_state(pdev); ++ ++ if (!netdev || !netif_running(netdev)) { ++ rc = PCI_ERS_RESULT_RECOVERED; ++ goto done; ++ } ++ ++ err = tg3_power_up(tp); ++ if (err) ++ goto done; ++ ++ rc = PCI_ERS_RESULT_RECOVERED; ++ ++done: ++ if (rc != PCI_ERS_RESULT_RECOVERED && netdev && netif_running(netdev)) { ++ tg3_napi_enable(tp); ++ dev_close(netdev); ++ } ++ rtnl_unlock(); ++ ++ return rc; ++} ++ ++/** ++ * tg3_io_resume - called when traffic can start flowing again. ++ * @pdev: Pointer to PCI device ++ * ++ * This callback is called when the error recovery driver tells ++ * us that its OK to resume normal operation. ++ */ ++static void tg3_io_resume(struct pci_dev *pdev) ++{ ++ struct net_device *netdev = pci_get_drvdata(pdev); ++ struct tg3 *tp = netdev_priv(netdev); ++ int err; ++ ++ rtnl_lock(); ++ ++ if (!netif_running(netdev)) ++ goto done; ++ ++ tg3_ape_driver_state_change(tp, RESET_KIND_INIT); ++ tg3_full_lock(tp, 0); ++ tg3_flag_set(tp, INIT_COMPLETE); ++ err = tg3_restart_hw(tp, true); ++ if (err) { ++ tg3_full_unlock(tp); ++ netdev_err(netdev, "Cannot restart hardware after reset.\n"); ++ goto done; ++ } ++ ++ netif_device_attach(netdev); ++ ++ tg3_timer_start(tp); ++ ++ tg3_netif_start(tp); ++ ++ tg3_full_unlock(tp); ++ ++ tg3_phy_start(tp); ++ ++done: ++ rtnl_unlock(); ++} ++ ++static struct pci_error_handlers tg3_err_handler = { ++ .error_detected = tg3_io_error_detected, ++ .slot_reset = tg3_io_slot_reset, ++ .resume = tg3_io_resume ++}; ++#endif /* BCM_HAS_PCI_EEH_SUPPORT */ ++ ++static struct pci_driver tg3_driver = { ++ .name = DRV_MODULE_NAME, ++ .id_table = tg3_pci_tbl, ++ .probe = tg3_init_one, ++ .remove = __devexit_p(tg3_remove_one), ++#ifdef BCM_HAS_PCI_EEH_SUPPORT ++ .err_handler = &tg3_err_handler, ++#endif ++#ifdef SIMPLE_DEV_PM_OPS ++ .driver.pm = TG3_PM_OPS, ++#else ++ .suspend = tg3_suspend, ++ .resume = tg3_resume, ++#endif ++#ifdef BCM_HAS_PCI_PMOPS_SHUTDOWN ++ .shutdown = tg3_shutdown, ++#endif ++}; ++ ++static int __init tg3_init(void) ++{ ++#ifdef TG3_VMWARE_NETQ_ENABLE ++ int i; ++ for (i = 0; i < TG3_MAX_NIC; i++) { ++ if (tg3_netq_force[i] < TG3_OPTION_UNSET || ++ tg3_netq_force[i] >= TG3_IRQ_MAX_VECS_IOV) { ++ dev_err(&pdev->dev, ++ "Invalid force_netq module parameter " ++ "value for index %d (%d)\n", ++ i, tg3_netq_force[i]); ++ return -EINVAL; ++ } ++ } ++#endif ++#if (LINUX_VERSION_CODE < 0x020613) && !defined (__VMKLNX__) ++ return pci_module_init(&tg3_driver); ++#else ++#if defined(__VMKLNX__) && (VMWARE_ESX_DDK_VERSION >= 55000) ++ if (!disable_fw_dmp) { ++ VMK_ReturnStatus status; ++ tg3_fwdmp_va_ptr = kzalloc(TG3_FWDMP_SIZE, GFP_KERNEL); ++ ++ if (!tg3_fwdmp_va_ptr) ++ dev_err(&pdev->dev, ++ "tg3: Unable to allocate memory " ++ "for fw dump handler!\n"); ++ status = vmklnx_dump_add_callback(TG3_DUMPNAME, ++ tg3_fwdmp_callback, ++ NULL, ++ TG3_DUMPNAME, ++ &tg3_fwdmp_dh); ++ if (status != VMK_OK) ++ dev_err(&pdev->dev, "tg3: Unable to register fw " ++ "dump handler (rc = 0x%x!)\n", status); ++ } ++#endif /*defined(__VMKLNX__) && (VMWARE_ESX_DDK_VERSION >= 55000) */ ++ ++ return pci_register_driver(&tg3_driver); ++#endif ++} ++ ++static void __exit tg3_cleanup(void) ++{ ++#if (defined(__VMKLNX__) && VMWARE_ESX_DDK_VERSION >= 55000) ++ if (tg3_fwdmp_dh) { ++ VMK_ReturnStatus status = ++ vmklnx_dump_delete_callback(tg3_fwdmp_dh); ++ if (status != VMK_OK) ++ VMK_ASSERT(0); ++ } ++ kfree(tg3_fwdmp_va_ptr); ++ tg3_fwdmp_va_ptr = NULL; ++#endif /* defined(__VMKLNX__) && (VMWARE_ESX_DDK_VERSION >= 55000) */ ++ pci_unregister_driver(&tg3_driver); ++} ++ ++#if defined(__VMKLNX__) ++#include "tg3_vmware.c" ++#endif ++ ++module_init(tg3_init); ++module_exit(tg3_cleanup); +diff --git a/drivers/net/ethernet/broadcom/tg3/tg3.h b/drivers/net/ethernet/broadcom/tg3/tg3.h +new file mode 100644 +index 0000000..0dd1a61 +--- /dev/null ++++ b/drivers/net/ethernet/broadcom/tg3/tg3.h +@@ -0,0 +1,3596 @@ ++/* $Id$ ++ * tg3.h: Definitions for Broadcom Tigon3 ethernet driver. ++ * ++ * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com) ++ * Copyright (C) 2001 Jeff Garzik (jgarzik@pobox.com) ++ * Copyright (C) 2004 Sun Microsystems Inc. ++ * Copyright (C) 2007-2015 Broadcom Corporation. ++ */ ++ ++#ifndef _T3_H ++#define _T3_H ++ ++#include "tg3_compat.h" ++ ++#define TG3_64BIT_REG_HIGH 0x00UL ++#define TG3_64BIT_REG_LOW 0x04UL ++ ++/* Descriptor block info. */ ++#define TG3_BDINFO_HOST_ADDR 0x0UL /* 64-bit */ ++#define TG3_BDINFO_MAXLEN_FLAGS 0x8UL /* 32-bit */ ++#define BDINFO_FLAGS_USE_EXT_RECV 0x00000001 /* ext rx_buffer_desc */ ++#define BDINFO_FLAGS_DISABLED 0x00000002 ++#define BDINFO_FLAGS_MAXLEN_MASK 0xffff0000 ++#define BDINFO_FLAGS_MAXLEN_SHIFT 16 ++#define TG3_BDINFO_NIC_ADDR 0xcUL /* 32-bit */ ++#define TG3_BDINFO_SIZE 0x10UL ++ ++#define TG3_RX_STD_MAX_SIZE_5700 512 ++#define TG3_RX_STD_MAX_SIZE_5717 2048 ++#define TG3_RX_JMB_MAX_SIZE_5700 256 ++#define TG3_RX_JMB_MAX_SIZE_5717 1024 ++#define TG3_RX_RET_MAX_SIZE_5700 1024 ++#define TG3_RX_RET_MAX_SIZE_5705 512 ++#define TG3_RX_RET_MAX_SIZE_5717 4096 ++ ++#define TG3_RSS_INDIR_TBL_SIZE 128 ++ ++/* First 256 bytes are a mirror of PCI config space. */ ++#define TG3PCI_VENDOR 0x00000000 ++#define TG3PCI_VENDOR_BROADCOM 0x14e4 ++#define TG3PCI_DEVICE 0x00000002 ++#define TG3PCI_DEVICE_TIGON3_1 0x1644 /* BCM5700 */ ++#define TG3PCI_DEVICE_TIGON3_2 0x1645 /* BCM5701 */ ++#define TG3PCI_DEVICE_TIGON3_3 0x1646 /* BCM5702 */ ++#define TG3PCI_DEVICE_TIGON3_4 0x1647 /* BCM5703 */ ++#define TG3PCI_DEVICE_TIGON3_5761S 0x1688 ++#define TG3PCI_DEVICE_TIGON3_5761SE 0x1689 ++#define TG3PCI_DEVICE_TIGON3_57780 0x1692 ++#define TG3PCI_DEVICE_TIGON3_5787M 0x1693 ++#define TG3PCI_DEVICE_TIGON3_57760 0x1690 ++#define TG3PCI_DEVICE_TIGON3_57790 0x1694 ++#define TG3PCI_DEVICE_TIGON3_57788 0x1691 ++#define TG3PCI_DEVICE_TIGON3_5785_G 0x1699 /* GPHY */ ++#define TG3PCI_DEVICE_TIGON3_5785_F 0x16a0 /* 10/100 only */ ++#define TG3PCI_DEVICE_TIGON3_5717 0x1655 ++#define TG3PCI_DEVICE_TIGON3_5717_C 0x1665 ++#define TG3PCI_DEVICE_TIGON3_5718 0x1656 ++#define TG3PCI_DEVICE_TIGON3_57781 0x16b1 ++#define TG3PCI_DEVICE_TIGON3_57785 0x16b5 ++#define TG3PCI_DEVICE_TIGON3_57761 0x16b0 ++#define TG3PCI_DEVICE_TIGON3_57765 0x16b4 ++#define TG3PCI_DEVICE_TIGON3_57791 0x16b2 ++#define TG3PCI_DEVICE_TIGON3_57795 0x16b6 ++#define TG3PCI_DEVICE_TIGON3_5719 0x1657 ++#define TG3PCI_DEVICE_TIGON3_5720 0x165f ++#define TG3PCI_DEVICE_TIGON3_57762 0x1682 ++#define TG3PCI_DEVICE_TIGON3_57766 0x1686 ++#define TG3PCI_DEVICE_TIGON3_57786 0x16b3 ++#define TG3PCI_DEVICE_TIGON3_57782 0x16b7 ++#define TG3PCI_DEVICE_TIGON3_5762 0x1687 ++#define TG3PCI_DEVICE_TIGON3_5725 0x1643 ++#define TG3PCI_DEVICE_TIGON3_5727 0x16f3 ++#define TG3PCI_DEVICE_TIGON3_57764 0x1642 ++#define TG3PCI_DEVICE_TIGON3_57767 0x1683 ++#define TG3PCI_DEVICE_TIGON3_57787 0x1641 ++/* 0x04 --> 0x2c unused */ ++#define TG3PCI_SUBVENDOR_ID_BROADCOM PCI_VENDOR_ID_BROADCOM ++#define TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6 0x1644 ++#define TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5 0x0001 ++#define TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6 0x0002 ++#define TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9 0x0003 ++#define TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1 0x0005 ++#define TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8 0x0006 ++#define TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7 0x0007 ++#define TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10 0x0008 ++#define TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12 0x8008 ++#define TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1 0x0009 ++#define TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2 0x8009 ++#define TG3PCI_SUBVENDOR_ID_3COM PCI_VENDOR_ID_3COM ++#define TG3PCI_SUBDEVICE_ID_3COM_3C996T 0x1000 ++#define TG3PCI_SUBDEVICE_ID_3COM_3C996BT 0x1006 ++#define TG3PCI_SUBDEVICE_ID_3COM_3C996SX 0x1004 ++#define TG3PCI_SUBDEVICE_ID_3COM_3C1000T 0x1007 ++#define TG3PCI_SUBDEVICE_ID_3COM_3C940BR01 0x1008 ++#define TG3PCI_SUBVENDOR_ID_DELL PCI_VENDOR_ID_DELL ++#define TG3PCI_SUBDEVICE_ID_DELL_VIPER 0x00d1 ++#define TG3PCI_SUBDEVICE_ID_DELL_JAGUAR 0x0106 ++#define TG3PCI_SUBDEVICE_ID_DELL_MERLOT 0x0109 ++#define TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT 0x010a ++#define TG3PCI_SUBVENDOR_ID_COMPAQ PCI_VENDOR_ID_COMPAQ ++#define TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE 0x007c ++#define TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2 0x009a ++#define TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING 0x007d ++#define TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780 0x0085 ++#define TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2 0x0099 ++#define TG3PCI_SUBVENDOR_ID_IBM PCI_VENDOR_ID_IBM ++#define TG3PCI_SUBDEVICE_ID_IBM_5703SAX2 0x0281 ++#define TG3PCI_SUBDEVICE_ID_ACER_57780_A 0x0601 ++#define TG3PCI_SUBDEVICE_ID_ACER_57780_B 0x0612 ++#define TG3PCI_SUBDEVICE_ID_LENOVO_5787M 0x3056 ++ ++/* 0x30 --> 0x64 unused */ ++#define TG3PCI_MSI_DATA 0x00000064 ++/* 0x66 --> 0x68 unused */ ++#define TG3PCI_MISC_HOST_CTRL 0x00000068 ++#define MISC_HOST_CTRL_CLEAR_INT 0x00000001 ++#define MISC_HOST_CTRL_MASK_PCI_INT 0x00000002 ++#define MISC_HOST_CTRL_BYTE_SWAP 0x00000004 ++#define MISC_HOST_CTRL_WORD_SWAP 0x00000008 ++#define MISC_HOST_CTRL_PCISTATE_RW 0x00000010 ++#define MISC_HOST_CTRL_CLKREG_RW 0x00000020 ++#define MISC_HOST_CTRL_REGWORD_SWAP 0x00000040 ++#define MISC_HOST_CTRL_INDIR_ACCESS 0x00000080 ++#define MISC_HOST_CTRL_IRQ_MASK_MODE 0x00000100 ++#define MISC_HOST_CTRL_TAGGED_STATUS 0x00000200 ++#define MISC_HOST_CTRL_CHIPREV 0xffff0000 ++#define MISC_HOST_CTRL_CHIPREV_SHIFT 16 ++ ++#define CHIPREV_ID_5700_A0 0x7000 ++#define CHIPREV_ID_5700_A1 0x7001 ++#define CHIPREV_ID_5700_B0 0x7100 ++#define CHIPREV_ID_5700_B1 0x7101 ++#define CHIPREV_ID_5700_B3 0x7102 ++#define CHIPREV_ID_5700_ALTIMA 0x7104 ++#define CHIPREV_ID_5700_C0 0x7200 ++#define CHIPREV_ID_5701_A0 0x0000 ++#define CHIPREV_ID_5701_B0 0x0100 ++#define CHIPREV_ID_5701_B2 0x0102 ++#define CHIPREV_ID_5701_B5 0x0105 ++#define CHIPREV_ID_5703_A0 0x1000 ++#define CHIPREV_ID_5703_A1 0x1001 ++#define CHIPREV_ID_5703_A2 0x1002 ++#define CHIPREV_ID_5703_A3 0x1003 ++#define CHIPREV_ID_5704_A0 0x2000 ++#define CHIPREV_ID_5704_A1 0x2001 ++#define CHIPREV_ID_5704_A2 0x2002 ++#define CHIPREV_ID_5704_A3 0x2003 ++#define CHIPREV_ID_5705_A0 0x3000 ++#define CHIPREV_ID_5705_A1 0x3001 ++#define CHIPREV_ID_5705_A2 0x3002 ++#define CHIPREV_ID_5705_A3 0x3003 ++#define CHIPREV_ID_5750_A0 0x4000 ++#define CHIPREV_ID_5750_A1 0x4001 ++#define CHIPREV_ID_5750_A3 0x4003 ++#define CHIPREV_ID_5750_C2 0x4202 ++#define CHIPREV_ID_5752_A0_HW 0x5000 ++#define CHIPREV_ID_5752_A0 0x6000 ++#define CHIPREV_ID_5752_A1 0x6001 ++#define CHIPREV_ID_5714_A2 0x9002 ++#define CHIPREV_ID_5906_A1 0xc001 ++#define CHIPREV_ID_57780_A0 0x57780000 ++#define CHIPREV_ID_57780_A1 0x57780001 ++#define CHIPREV_ID_5717_A0 0x05717000 ++#define CHIPREV_ID_5717_C0 0x05717200 ++#define CHIPREV_ID_57765_A0 0x57785000 ++#define CHIPREV_ID_5719_A0 0x05719000 ++#define CHIPREV_ID_5720_A0 0x05720000 ++#define CHIPREV_ID_5762_A0 0x05762000 ++ ++#define ASIC_REV_5700 0x07 ++#define ASIC_REV_5701 0x00 ++#define ASIC_REV_5703 0x01 ++#define ASIC_REV_5704 0x02 ++#define ASIC_REV_5705 0x03 ++#define ASIC_REV_5750 0x04 ++#define ASIC_REV_5752 0x06 ++#define ASIC_REV_5780 0x08 ++#define ASIC_REV_5714 0x09 ++#define ASIC_REV_5755 0x0a ++#define ASIC_REV_5787 0x0b ++#define ASIC_REV_5906 0x0c ++#define ASIC_REV_USE_PROD_ID_REG 0x0f ++#define ASIC_REV_5784 0x5784 ++#define ASIC_REV_5761 0x5761 ++#define ASIC_REV_5785 0x5785 ++#define ASIC_REV_57780 0x57780 ++#define ASIC_REV_5717 0x5717 ++#define ASIC_REV_57765 0x57785 ++#define ASIC_REV_5719 0x5719 ++#define ASIC_REV_5720 0x5720 ++#define ASIC_REV_57766 0x57766 ++#define ASIC_REV_5762 0x5762 ++#define CHIPREV_5700_AX 0x70 ++#define CHIPREV_5700_BX 0x71 ++#define CHIPREV_5700_CX 0x72 ++#define CHIPREV_5701_AX 0x00 ++#define CHIPREV_5703_AX 0x10 ++#define CHIPREV_5704_AX 0x20 ++#define CHIPREV_5704_BX 0x21 ++#define CHIPREV_5750_AX 0x40 ++#define CHIPREV_5750_BX 0x41 ++#define CHIPREV_5784_AX 0x57840 ++#define CHIPREV_5761_AX 0x57610 ++#define CHIPREV_57765_AX 0x577650 ++#define METAL_REV_A0 0x00 ++#define METAL_REV_A1 0x01 ++#define METAL_REV_B0 0x00 ++#define METAL_REV_B1 0x01 ++#define METAL_REV_B2 0x02 ++#define TG3PCI_DMA_RW_CTRL 0x0000006c ++#define DMA_RWCTRL_DIS_CACHE_ALIGNMENT 0x00000001 ++#define DMA_RWCTRL_TAGGED_STAT_WA 0x00000080 ++#define DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK 0x00000380 ++#define DMA_RWCTRL_READ_BNDRY_MASK 0x00000700 ++#define DMA_RWCTRL_READ_BNDRY_DISAB 0x00000000 ++#define DMA_RWCTRL_READ_BNDRY_16 0x00000100 ++#define DMA_RWCTRL_READ_BNDRY_128_PCIX 0x00000100 ++#define DMA_RWCTRL_READ_BNDRY_32 0x00000200 ++#define DMA_RWCTRL_READ_BNDRY_256_PCIX 0x00000200 ++#define DMA_RWCTRL_READ_BNDRY_64 0x00000300 ++#define DMA_RWCTRL_READ_BNDRY_384_PCIX 0x00000300 ++#define DMA_RWCTRL_READ_BNDRY_128 0x00000400 ++#define DMA_RWCTRL_READ_BNDRY_256 0x00000500 ++#define DMA_RWCTRL_READ_BNDRY_512 0x00000600 ++#define DMA_RWCTRL_READ_BNDRY_1024 0x00000700 ++#define DMA_RWCTRL_WRITE_BNDRY_MASK 0x00003800 ++#define DMA_RWCTRL_WRITE_BNDRY_DISAB 0x00000000 ++#define DMA_RWCTRL_WRITE_BNDRY_16 0x00000800 ++#define DMA_RWCTRL_WRITE_BNDRY_128_PCIX 0x00000800 ++#define DMA_RWCTRL_WRITE_BNDRY_32 0x00001000 ++#define DMA_RWCTRL_WRITE_BNDRY_256_PCIX 0x00001000 ++#define DMA_RWCTRL_WRITE_BNDRY_64 0x00001800 ++#define DMA_RWCTRL_WRITE_BNDRY_384_PCIX 0x00001800 ++#define DMA_RWCTRL_WRITE_BNDRY_128 0x00002000 ++#define DMA_RWCTRL_WRITE_BNDRY_256 0x00002800 ++#define DMA_RWCTRL_WRITE_BNDRY_512 0x00003000 ++#define DMA_RWCTRL_WRITE_BNDRY_1024 0x00003800 ++#define DMA_RWCTRL_ONE_DMA 0x00004000 ++#define DMA_RWCTRL_READ_WATER 0x00070000 ++#define DMA_RWCTRL_READ_WATER_SHIFT 16 ++#define DMA_RWCTRL_WRITE_WATER 0x00380000 ++#define DMA_RWCTRL_WRITE_WATER_SHIFT 19 ++#define DMA_RWCTRL_USE_MEM_READ_MULT 0x00400000 ++#define DMA_RWCTRL_ASSERT_ALL_BE 0x00800000 ++#define DMA_RWCTRL_PCI_READ_CMD 0x0f000000 ++#define DMA_RWCTRL_PCI_READ_CMD_SHIFT 24 ++#define DMA_RWCTRL_PCI_WRITE_CMD 0xf0000000 ++#define DMA_RWCTRL_PCI_WRITE_CMD_SHIFT 28 ++#define DMA_RWCTRL_WRITE_BNDRY_64_PCIE 0x10000000 ++#define DMA_RWCTRL_WRITE_BNDRY_128_PCIE 0x30000000 ++#define DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE 0x70000000 ++#define TG3PCI_PCISTATE 0x00000070 ++#define PCISTATE_FORCE_RESET 0x00000001 ++#define PCISTATE_INT_NOT_ACTIVE 0x00000002 ++#define PCISTATE_CONV_PCI_MODE 0x00000004 ++#define PCISTATE_BUS_SPEED_HIGH 0x00000008 ++#define PCISTATE_BUS_32BIT 0x00000010 ++#define PCISTATE_ROM_ENABLE 0x00000020 ++#define PCISTATE_ROM_RETRY_ENABLE 0x00000040 ++#define PCISTATE_FLAT_VIEW 0x00000100 ++#define PCISTATE_RETRY_SAME_DMA 0x00002000 ++#define PCISTATE_ALLOW_APE_CTLSPC_WR 0x00010000 ++#define PCISTATE_ALLOW_APE_SHMEM_WR 0x00020000 ++#define PCISTATE_ALLOW_APE_PSPACE_WR 0x00040000 ++#define TG3PCI_CLOCK_CTRL 0x00000074 ++#define CLOCK_CTRL_CORECLK_DISABLE 0x00000200 ++#define CLOCK_CTRL_RXCLK_DISABLE 0x00000400 ++#define CLOCK_CTRL_TXCLK_DISABLE 0x00000800 ++#define CLOCK_CTRL_ALTCLK 0x00001000 ++#define CLOCK_CTRL_PWRDOWN_PLL133 0x00008000 ++#define CLOCK_CTRL_44MHZ_CORE 0x00040000 ++#define CLOCK_CTRL_625_CORE 0x00100000 ++#define CLOCK_CTRL_FORCE_CLKRUN 0x00200000 ++#define CLOCK_CTRL_CLKRUN_OENABLE 0x00400000 ++#define CLOCK_CTRL_DELAY_PCI_GRANT 0x80000000 ++#define TG3PCI_REG_BASE_ADDR 0x00000078 ++#define TG3PCI_MEM_WIN_BASE_ADDR 0x0000007c ++#define TG3PCI_REG_DATA 0x00000080 ++#define TG3PCI_MEM_WIN_DATA 0x00000084 ++#define TG3PCI_MISC_LOCAL_CTRL 0x00000090 ++/* 0x94 --> 0x98 unused */ ++#define TG3PCI_STD_RING_PROD_IDX 0x00000098 /* 64-bit */ ++#define TG3PCI_RCV_RET_RING_CON_IDX 0x000000a0 /* 64-bit */ ++/* 0xa8 --> 0xb8 unused */ ++#define TG3PCI_DUAL_MAC_CTRL 0x000000b8 ++#define DUAL_MAC_CTRL_CH_MASK 0x00000003 ++#define DUAL_MAC_CTRL_ID 0x00000004 ++#define TG3PCI_PRODID_ASICREV 0x000000bc ++#define PROD_ID_ASIC_REV_MASK 0x0fffffff ++/* 0xc0 --> 0xf4 unused */ ++ ++#define TG3PCI_GEN2_PRODID_ASICREV 0x000000f4 ++#define TG3PCI_GEN15_PRODID_ASICREV 0x000000fc ++/* 0xf8 --> 0x200 unused */ ++ ++#define TG3_CORR_ERR_STAT 0x00000110 ++#define TG3_CORR_ERR_STAT_CLEAR 0xffffffff ++/* 0x114 --> 0x200 unused */ ++ ++/* Mailbox registers */ ++#define MAILBOX_INTERRUPT_0 0x00000200 /* 64-bit */ ++#define MAILBOX_INTERRUPT_1 0x00000208 /* 64-bit */ ++#define MAILBOX_INTERRUPT_2 0x00000210 /* 64-bit */ ++#define MAILBOX_INTERRUPT_3 0x00000218 /* 64-bit */ ++#define MAILBOX_GENERAL_0 0x00000220 /* 64-bit */ ++#define MAILBOX_GENERAL_1 0x00000228 /* 64-bit */ ++#define MAILBOX_GENERAL_2 0x00000230 /* 64-bit */ ++#define MAILBOX_GENERAL_3 0x00000238 /* 64-bit */ ++#define MAILBOX_GENERAL_4 0x00000240 /* 64-bit */ ++#define MAILBOX_GENERAL_5 0x00000248 /* 64-bit */ ++#define MAILBOX_GENERAL_6 0x00000250 /* 64-bit */ ++#define MAILBOX_GENERAL_7 0x00000258 /* 64-bit */ ++#define MAILBOX_RELOAD_STAT 0x00000260 /* 64-bit */ ++#define MAILBOX_RCV_STD_PROD_IDX 0x00000268 /* 64-bit */ ++#define TG3_RX_STD_PROD_IDX_REG (MAILBOX_RCV_STD_PROD_IDX + \ ++ TG3_64BIT_REG_LOW) ++#define MAILBOX_RCV_JUMBO_PROD_IDX 0x00000270 /* 64-bit */ ++#define TG3_RX_JMB_PROD_IDX_REG (MAILBOX_RCV_JUMBO_PROD_IDX + \ ++ TG3_64BIT_REG_LOW) ++#define MAILBOX_RCV_MINI_PROD_IDX 0x00000278 /* 64-bit */ ++#define MAILBOX_RCVRET_CON_IDX_0 0x00000280 /* 64-bit */ ++#define MAILBOX_RCVRET_CON_IDX_1 0x00000288 /* 64-bit */ ++#define MAILBOX_RCVRET_CON_IDX_2 0x00000290 /* 64-bit */ ++#define MAILBOX_RCVRET_CON_IDX_3 0x00000298 /* 64-bit */ ++#define MAILBOX_RCVRET_CON_IDX_4 0x000002a0 /* 64-bit */ ++#define MAILBOX_RCVRET_CON_IDX_5 0x000002a8 /* 64-bit */ ++#define MAILBOX_RCVRET_CON_IDX_6 0x000002b0 /* 64-bit */ ++#define MAILBOX_RCVRET_CON_IDX_7 0x000002b8 /* 64-bit */ ++#define MAILBOX_RCVRET_CON_IDX_8 0x000002c0 /* 64-bit */ ++#define MAILBOX_RCVRET_CON_IDX_9 0x000002c8 /* 64-bit */ ++#define MAILBOX_RCVRET_CON_IDX_10 0x000002d0 /* 64-bit */ ++#define MAILBOX_RCV_JUMBO_PROD_IDX_RING1 0x000002d4 /* 32-bit */ ++#define MAILBOX_RCVRET_CON_IDX_11 0x000002d8 /* 64-bit */ ++#define MAILBOX_RCVRET_CON_IDX_12 0x000002e0 /* 64-bit */ ++#define MAILBOX_RCVRET_CON_IDX_13 0x000002e8 /* 64-bit */ ++#define MAILBOX_RCVRET_CON_IDX_14 0x000002f0 /* 64-bit */ ++#define MAILBOX_RCVRET_CON_IDX_15 0x000002f8 /* 64-bit */ ++#define MAILBOX_SNDHOST_PROD_IDX_0 0x00000300 /* 64-bit */ ++#define MAILBOX_SNDHOST_PROD_IDX_1 0x00000308 /* 64-bit */ ++#define MAILBOX_SNDHOST_PROD_IDX_2 0x00000310 /* 64-bit */ ++#define MAILBOX_SNDHOST_PROD_IDX_3 0x00000318 /* 64-bit */ ++#define MAILBOX_SNDHOST_PROD_IDX_4 0x00000320 /* 64-bit */ ++#define MAILBOX_SNDHOST_PROD_IDX_5 0x00000328 /* 64-bit */ ++#define MAILBOX_SNDHOST_PROD_IDX_6 0x00000330 /* 64-bit */ ++#define MAILBOX_SNDHOST_PROD_IDX_7 0x00000338 /* 64-bit */ ++#define MAILBOX_SNDHOST_PROD_IDX_8 0x00000340 /* 64-bit */ ++#define MAILBOX_RCV_JMB_PROD_IDX_RING12 0x00000340 /* 32-bit */ ++#define MAILBOX_SNDHOST_PROD_IDX_9 0x00000348 /* 64-bit */ ++#define MAILBOX_SNDHOST_PROD_IDX_10 0x00000350 /* 64-bit */ ++#define MAILBOX_RCV_STD_PROD_IDX_RING1 0x00000354 /* 32-bit */ ++#define MAILBOX_SNDHOST_PROD_IDX_11 0x00000358 /* 64-bit */ ++#define MAILBOX_SNDHOST_PROD_IDX_12 0x00000360 /* 64-bit */ ++#define MAILBOX_SNDHOST_PROD_IDX_13 0x00000368 /* 64-bit */ ++#define MAILBOX_SNDHOST_PROD_IDX_14 0x00000370 /* 64-bit */ ++#define MAILBOX_SNDHOST_PROD_IDX_15 0x00000378 /* 64-bit */ ++#define MAILBOX_SNDNIC_PROD_IDX_0 0x00000380 /* 64-bit */ ++#define MAILBOX_SNDNIC_PROD_IDX_1 0x00000388 /* 64-bit */ ++#define MAILBOX_SNDNIC_PROD_IDX_2 0x00000390 /* 64-bit */ ++#define MAILBOX_SNDNIC_PROD_IDX_3 0x00000398 /* 64-bit */ ++#define MAILBOX_SNDNIC_PROD_IDX_4 0x000003a0 /* 64-bit */ ++#define MAILBOX_SNDNIC_PROD_IDX_5 0x000003a8 /* 64-bit */ ++#define MAILBOX_SNDNIC_PROD_IDX_6 0x000003b0 /* 64-bit */ ++#define MAILBOX_SNDNIC_PROD_IDX_7 0x000003b8 /* 64-bit */ ++#define MAILBOX_SNDNIC_PROD_IDX_8 0x000003c0 /* 64-bit */ ++#define MAILBOX_SNDNIC_PROD_IDX_9 0x000003c8 /* 64-bit */ ++#define MAILBOX_SNDNIC_PROD_IDX_10 0x000003d0 /* 64-bit */ ++#define MAILBOX_SNDNIC_PROD_IDX_11 0x000003d8 /* 64-bit */ ++#define MAILBOX_SNDNIC_PROD_IDX_12 0x000003e0 /* 64-bit */ ++#define MAILBOX_SNDNIC_PROD_IDX_13 0x000003e8 /* 64-bit */ ++#define MAILBOX_SNDNIC_PROD_IDX_14 0x000003f0 /* 64-bit */ ++#define MAILBOX_SNDNIC_PROD_IDX_15 0x000003f8 /* 64-bit */ ++ ++/* MAC control registers */ ++#define MAC_MODE 0x00000400 ++#define MAC_MODE_RESET 0x00000001 ++#define MAC_MODE_HALF_DUPLEX 0x00000002 ++#define MAC_MODE_PORT_MODE_MASK 0x0000000c ++#define MAC_MODE_PORT_MODE_TBI 0x0000000c ++#define MAC_MODE_PORT_MODE_GMII 0x00000008 ++#define MAC_MODE_PORT_MODE_MII 0x00000004 ++#define MAC_MODE_PORT_MODE_NONE 0x00000000 ++#define MAC_MODE_PORT_INT_LPBACK 0x00000010 ++#define MAC_MODE_TAGGED_MAC_CTRL 0x00000080 ++#define MAC_MODE_TX_BURSTING 0x00000100 ++#define MAC_MODE_MAX_DEFER 0x00000200 ++#define MAC_MODE_LINK_POLARITY 0x00000400 ++#define MAC_MODE_RXSTAT_ENABLE 0x00000800 ++#define MAC_MODE_RXSTAT_CLEAR 0x00001000 ++#define MAC_MODE_RXSTAT_FLUSH 0x00002000 ++#define MAC_MODE_TXSTAT_ENABLE 0x00004000 ++#define MAC_MODE_TXSTAT_CLEAR 0x00008000 ++#define MAC_MODE_TXSTAT_FLUSH 0x00010000 ++#define MAC_MODE_SEND_CONFIGS 0x00020000 ++#define MAC_MODE_MAGIC_PKT_ENABLE 0x00040000 ++#define MAC_MODE_ACPI_ENABLE 0x00080000 ++#define MAC_MODE_MIP_ENABLE 0x00100000 ++#define MAC_MODE_TDE_ENABLE 0x00200000 ++#define MAC_MODE_RDE_ENABLE 0x00400000 ++#define MAC_MODE_FHDE_ENABLE 0x00800000 ++#define MAC_MODE_KEEP_FRAME_IN_WOL 0x01000000 ++#define MAC_MODE_APE_RX_EN 0x08000000 ++#define MAC_MODE_APE_TX_EN 0x10000000 ++#define MAC_STATUS 0x00000404 ++#define MAC_STATUS_PCS_SYNCED 0x00000001 ++#define MAC_STATUS_SIGNAL_DET 0x00000002 ++#define MAC_STATUS_RCVD_CFG 0x00000004 ++#define MAC_STATUS_CFG_CHANGED 0x00000008 ++#define MAC_STATUS_SYNC_CHANGED 0x00000010 ++#define MAC_STATUS_PORT_DEC_ERR 0x00000400 ++#define MAC_STATUS_LNKSTATE_CHANGED 0x00001000 ++#define MAC_STATUS_MI_COMPLETION 0x00400000 ++#define MAC_STATUS_MI_INTERRUPT 0x00800000 ++#define MAC_STATUS_AP_ERROR 0x01000000 ++#define MAC_STATUS_ODI_ERROR 0x02000000 ++#define MAC_STATUS_RXSTAT_OVERRUN 0x04000000 ++#define MAC_STATUS_TXSTAT_OVERRUN 0x08000000 ++#define MAC_EVENT 0x00000408 ++#define MAC_EVENT_PORT_DECODE_ERR 0x00000400 ++#define MAC_EVENT_LNKSTATE_CHANGED 0x00001000 ++#define MAC_EVENT_MI_COMPLETION 0x00400000 ++#define MAC_EVENT_MI_INTERRUPT 0x00800000 ++#define MAC_EVENT_AP_ERROR 0x01000000 ++#define MAC_EVENT_ODI_ERROR 0x02000000 ++#define MAC_EVENT_RXSTAT_OVERRUN 0x04000000 ++#define MAC_EVENT_TXSTAT_OVERRUN 0x08000000 ++#define MAC_LED_CTRL 0x0000040c ++#define LED_CTRL_LNKLED_OVERRIDE 0x00000001 ++#define LED_CTRL_1000MBPS_ON 0x00000002 ++#define LED_CTRL_100MBPS_ON 0x00000004 ++#define LED_CTRL_10MBPS_ON 0x00000008 ++#define LED_CTRL_TRAFFIC_OVERRIDE 0x00000010 ++#define LED_CTRL_TRAFFIC_BLINK 0x00000020 ++#define LED_CTRL_TRAFFIC_LED 0x00000040 ++#define LED_CTRL_1000MBPS_STATUS 0x00000080 ++#define LED_CTRL_100MBPS_STATUS 0x00000100 ++#define LED_CTRL_10MBPS_STATUS 0x00000200 ++#define LED_CTRL_TRAFFIC_STATUS 0x00000400 ++#define LED_CTRL_MODE_MAC 0x00000000 ++#define LED_CTRL_MODE_PHY_1 0x00000800 ++#define LED_CTRL_MODE_PHY_2 0x00001000 ++#define LED_CTRL_MODE_SHASTA_MAC 0x00002000 ++#define LED_CTRL_MODE_SHARED 0x00004000 ++#define LED_CTRL_MODE_COMBO 0x00008000 ++#define LED_CTRL_BLINK_RATE_MASK 0x7ff80000 ++#define LED_CTRL_BLINK_RATE_SHIFT 19 ++#define LED_CTRL_BLINK_PER_OVERRIDE 0x00080000 ++#define LED_CTRL_BLINK_RATE_OVERRIDE 0x80000000 ++#define MAC_ADDR_0_HIGH 0x00000410 /* upper 2 bytes */ ++#define MAC_ADDR_0_LOW 0x00000414 /* lower 4 bytes */ ++#define MAC_ADDR_1_HIGH 0x00000418 /* upper 2 bytes */ ++#define MAC_ADDR_1_LOW 0x0000041c /* lower 4 bytes */ ++#define MAC_ADDR_2_HIGH 0x00000420 /* upper 2 bytes */ ++#define MAC_ADDR_2_LOW 0x00000424 /* lower 4 bytes */ ++#define MAC_ADDR_3_HIGH 0x00000428 /* upper 2 bytes */ ++#define MAC_ADDR_3_LOW 0x0000042c /* lower 4 bytes */ ++#define MAC_ACPI_MBUF_PTR 0x00000430 ++#define MAC_ACPI_LEN_OFFSET 0x00000434 ++#define ACPI_LENOFF_LEN_MASK 0x0000ffff ++#define ACPI_LENOFF_LEN_SHIFT 0 ++#define ACPI_LENOFF_OFF_MASK 0x0fff0000 ++#define ACPI_LENOFF_OFF_SHIFT 16 ++#define MAC_TX_BACKOFF_SEED 0x00000438 ++#define TX_BACKOFF_SEED_MASK 0x000003ff ++#define MAC_RX_MTU_SIZE 0x0000043c ++#define RX_MTU_SIZE_MASK 0x0000ffff ++#define MAC_PCS_TEST 0x00000440 ++#define PCS_TEST_PATTERN_MASK 0x000fffff ++#define PCS_TEST_PATTERN_SHIFT 0 ++#define PCS_TEST_ENABLE 0x00100000 ++#define MAC_TX_AUTO_NEG 0x00000444 ++#define TX_AUTO_NEG_MASK 0x0000ffff ++#define TX_AUTO_NEG_SHIFT 0 ++#define MAC_RX_AUTO_NEG 0x00000448 ++#define RX_AUTO_NEG_MASK 0x0000ffff ++#define RX_AUTO_NEG_SHIFT 0 ++#define MAC_MI_COM 0x0000044c ++#define MI_COM_CMD_MASK 0x0c000000 ++#define MI_COM_CMD_WRITE 0x04000000 ++#define MI_COM_CMD_READ 0x08000000 ++#define MI_COM_READ_FAILED 0x10000000 ++#define MI_COM_START 0x20000000 ++#define MI_COM_BUSY 0x20000000 ++#define MI_COM_PHY_ADDR_MASK 0x03e00000 ++#define MI_COM_PHY_ADDR_SHIFT 21 ++#define MI_COM_REG_ADDR_MASK 0x001f0000 ++#define MI_COM_REG_ADDR_SHIFT 16 ++#define MI_COM_DATA_MASK 0x0000ffff ++#define MAC_MI_STAT 0x00000450 ++#define MAC_MI_STAT_LNKSTAT_ATTN_ENAB 0x00000001 ++#define MAC_MI_STAT_10MBPS_MODE 0x00000002 ++#define MAC_MI_MODE 0x00000454 ++#define MAC_MI_MODE_CLK_10MHZ 0x00000001 ++#define MAC_MI_MODE_SHORT_PREAMBLE 0x00000002 ++#define MAC_MI_MODE_AUTO_POLL 0x00000010 ++#define MAC_MI_MODE_500KHZ_CONST 0x00008000 ++#define MAC_MI_MODE_BASE 0x000c0000 /* XXX magic values XXX */ ++#define MAC_AUTO_POLL_STATUS 0x00000458 ++#define MAC_AUTO_POLL_ERROR 0x00000001 ++#define MAC_TX_MODE 0x0000045c ++#define TX_MODE_RESET 0x00000001 ++#define TX_MODE_ENABLE 0x00000002 ++#define TX_MODE_FLOW_CTRL_ENABLE 0x00000010 ++#define TX_MODE_BIG_BCKOFF_ENABLE 0x00000020 ++#define TX_MODE_LONG_PAUSE_ENABLE 0x00000040 ++#define TX_MODE_MBUF_LOCKUP_FIX 0x00000100 ++#define TX_MODE_JMB_FRM_LEN 0x00400000 ++#define TX_MODE_CNT_DN_MODE 0x00800000 ++#define MAC_TX_STATUS 0x00000460 ++#define TX_STATUS_XOFFED 0x00000001 ++#define TX_STATUS_SENT_XOFF 0x00000002 ++#define TX_STATUS_SENT_XON 0x00000004 ++#define TX_STATUS_LINK_UP 0x00000008 ++#define TX_STATUS_ODI_UNDERRUN 0x00000010 ++#define TX_STATUS_ODI_OVERRUN 0x00000020 ++#define MAC_TX_LENGTHS 0x00000464 ++#define TX_LENGTHS_SLOT_TIME_MASK 0x000000ff ++#define TX_LENGTHS_SLOT_TIME_SHIFT 0 ++#define TX_LENGTHS_IPG_MASK 0x00000f00 ++#define TX_LENGTHS_IPG_SHIFT 8 ++#define TX_LENGTHS_IPG_CRS_MASK 0x00003000 ++#define TX_LENGTHS_IPG_CRS_SHIFT 12 ++#define TX_LENGTHS_JMB_FRM_LEN_MSK 0x00ff0000 ++#define TX_LENGTHS_CNT_DWN_VAL_MSK 0xff000000 ++#define MAC_RX_MODE 0x00000468 ++#define RX_MODE_RESET 0x00000001 ++#define RX_MODE_ENABLE 0x00000002 ++#define RX_MODE_FLOW_CTRL_ENABLE 0x00000004 ++#define RX_MODE_KEEP_MAC_CTRL 0x00000008 ++#define RX_MODE_KEEP_PAUSE 0x00000010 ++#define RX_MODE_ACCEPT_OVERSIZED 0x00000020 ++#define RX_MODE_ACCEPT_RUNTS 0x00000040 ++#define RX_MODE_LEN_CHECK 0x00000080 ++#define RX_MODE_PROMISC 0x00000100 ++#define RX_MODE_NO_CRC_CHECK 0x00000200 ++#define RX_MODE_KEEP_VLAN_TAG 0x00000400 ++#define RX_MODE_RSS_IPV4_HASH_EN 0x00010000 ++#define RX_MODE_RSS_TCP_IPV4_HASH_EN 0x00020000 ++#define RX_MODE_RSS_IPV6_HASH_EN 0x00040000 ++#define RX_MODE_RSS_TCP_IPV6_HASH_EN 0x00080000 ++#define RX_MODE_RSS_ITBL_HASH_BITS_7 0x00700000 ++#define RX_MODE_RSS_ENABLE 0x00800000 ++#define RX_MODE_IPV6_CSUM_ENABLE 0x01000000 ++#define RX_MODE_IPV4_FRAG_FIX 0x02000000 ++#define MAC_RX_STATUS 0x0000046c ++#define RX_STATUS_REMOTE_TX_XOFFED 0x00000001 ++#define RX_STATUS_XOFF_RCVD 0x00000002 ++#define RX_STATUS_XON_RCVD 0x00000004 ++#define MAC_HASH_REG_0 0x00000470 ++#define MAC_HASH_REG_1 0x00000474 ++#define MAC_HASH_REG_2 0x00000478 ++#define MAC_HASH_REG_3 0x0000047c ++#define MAC_RCV_RULE_0 0x00000480 ++#define MAC_RCV_VALUE_0 0x00000484 ++#define MAC_RCV_RULE_1 0x00000488 ++#define MAC_RCV_VALUE_1 0x0000048c ++#define MAC_RCV_RULE_2 0x00000490 ++#define MAC_RCV_VALUE_2 0x00000494 ++#define MAC_RCV_RULE_3 0x00000498 ++#define MAC_RCV_VALUE_3 0x0000049c ++#define MAC_RCV_RULE_4 0x000004a0 ++#define MAC_RCV_VALUE_4 0x000004a4 ++#define MAC_RCV_RULE_5 0x000004a8 ++#define MAC_RCV_VALUE_5 0x000004ac ++#define MAC_RCV_RULE_6 0x000004b0 ++#define MAC_RCV_VALUE_6 0x000004b4 ++#define MAC_RCV_RULE_7 0x000004b8 ++#define MAC_RCV_VALUE_7 0x000004bc ++#define MAC_RCV_RULE_8 0x000004c0 ++#define MAC_RCV_VALUE_8 0x000004c4 ++#define MAC_RCV_RULE_9 0x000004c8 ++#define MAC_RCV_VALUE_9 0x000004cc ++#define MAC_RCV_RULE_10 0x000004d0 ++#define MAC_RCV_VALUE_10 0x000004d4 ++#define MAC_RCV_RULE_11 0x000004d8 ++#define MAC_RCV_VALUE_11 0x000004dc ++#define MAC_RCV_RULE_12 0x000004e0 ++#define MAC_RCV_VALUE_12 0x000004e4 ++#define MAC_RCV_RULE_13 0x000004e8 ++#define MAC_RCV_VALUE_13 0x000004ec ++#define MAC_RCV_RULE_14 0x000004f0 ++#define MAC_RCV_VALUE_14 0x000004f4 ++#define MAC_RCV_RULE_15 0x000004f8 ++#define MAC_RCV_VALUE_15 0x000004fc ++#define RCV_RULE_DISABLE_MASK 0x7fffffff ++#define MAC_RCV_RULE_CFG 0x00000500 ++#define RCV_RULE_CFG_DEFAULT_CLASS 0x00000008 ++#define MAC_LOW_WMARK_MAX_RX_FRAME 0x00000504 ++/* 0x508 --> 0x520 unused */ ++#define MAC_HASHREGU_0 0x00000520 ++#define MAC_HASHREGU_1 0x00000524 ++#define MAC_HASHREGU_2 0x00000528 ++#define MAC_HASHREGU_3 0x0000052c ++#define MAC_EXTADDR_0_HIGH 0x00000530 ++#define MAC_EXTADDR_0_LOW 0x00000534 ++#define MAC_EXTADDR_1_HIGH 0x00000538 ++#define MAC_EXTADDR_1_LOW 0x0000053c ++#define MAC_EXTADDR_2_HIGH 0x00000540 ++#define MAC_EXTADDR_2_LOW 0x00000544 ++#define MAC_EXTADDR_3_HIGH 0x00000548 ++#define MAC_EXTADDR_3_LOW 0x0000054c ++#define MAC_EXTADDR_4_HIGH 0x00000550 ++#define MAC_EXTADDR_4_LOW 0x00000554 ++#define MAC_EXTADDR_5_HIGH 0x00000558 ++#define MAC_EXTADDR_5_LOW 0x0000055c ++#define MAC_EXTADDR_6_HIGH 0x00000560 ++#define MAC_VRQ_ENABLE 0x00000560 ++#define MAC_VRQ_ENABLE_DFLT_VRQ 0x00000001 ++#define MAC_EXTADDR_6_LOW 0x00000564 ++#define MAC_EXTADDR_7_HIGH 0x00000568 ++#define MAC_EXTADDR_7_LOW 0x0000056c ++#define MAC_EXTADDR_8_HIGH 0x00000570 ++#define MAC_EXTADDR_8_LOW 0x00000574 ++#define MAC_EXTADDR_9_HIGH 0x00000578 ++#define MAC_EXTADDR_9_LOW 0x0000057c ++#define MAC_EXTADDR_10_HIGH 0x00000580 ++#define MAC_EXTADDR_10_LOW 0x00000584 ++#define MAC_EXTADDR_11_HIGH 0x00000588 ++#define MAC_EXTADDR_11_LOW 0x0000058c ++#define MAC_SERDES_CFG 0x00000590 ++#define MAC_SERDES_CFG_EDGE_SELECT 0x00001000 ++#define MAC_SERDES_STAT 0x00000594 ++/* 0x598 --> 0x5a0 unused */ ++#define MAC_PHYCFG1 0x000005a0 ++#define MAC_PHYCFG1_RGMII_INT 0x00000001 ++#define MAC_PHYCFG1_RXCLK_TO_MASK 0x00001ff0 ++#define MAC_PHYCFG1_RXCLK_TIMEOUT 0x00001000 ++#define MAC_PHYCFG1_TXCLK_TO_MASK 0x01ff0000 ++#define MAC_PHYCFG1_TXCLK_TIMEOUT 0x01000000 ++#define MAC_PHYCFG1_RGMII_EXT_RX_DEC 0x02000000 ++#define MAC_PHYCFG1_RGMII_SND_STAT_EN 0x04000000 ++#define MAC_PHYCFG1_TXC_DRV 0x20000000 ++#define MAC_PHYCFG2 0x000005a4 ++#define MAC_PHYCFG2_INBAND_ENABLE 0x00000001 ++#define MAC_PHYCFG2_EMODE_MASK_MASK 0x000001c0 ++#define MAC_PHYCFG2_EMODE_MASK_AC131 0x000000c0 ++#define MAC_PHYCFG2_EMODE_MASK_50610 0x00000100 ++#define MAC_PHYCFG2_EMODE_MASK_RT8211 0x00000000 ++#define MAC_PHYCFG2_EMODE_MASK_RT8201 0x000001c0 ++#define MAC_PHYCFG2_EMODE_COMP_MASK 0x00000e00 ++#define MAC_PHYCFG2_EMODE_COMP_AC131 0x00000600 ++#define MAC_PHYCFG2_EMODE_COMP_50610 0x00000400 ++#define MAC_PHYCFG2_EMODE_COMP_RT8211 0x00000800 ++#define MAC_PHYCFG2_EMODE_COMP_RT8201 0x00000000 ++#define MAC_PHYCFG2_FMODE_MASK_MASK 0x00007000 ++#define MAC_PHYCFG2_FMODE_MASK_AC131 0x00006000 ++#define MAC_PHYCFG2_FMODE_MASK_50610 0x00004000 ++#define MAC_PHYCFG2_FMODE_MASK_RT8211 0x00000000 ++#define MAC_PHYCFG2_FMODE_MASK_RT8201 0x00007000 ++#define MAC_PHYCFG2_FMODE_COMP_MASK 0x00038000 ++#define MAC_PHYCFG2_FMODE_COMP_AC131 0x00030000 ++#define MAC_PHYCFG2_FMODE_COMP_50610 0x00008000 ++#define MAC_PHYCFG2_FMODE_COMP_RT8211 0x00038000 ++#define MAC_PHYCFG2_FMODE_COMP_RT8201 0x00000000 ++#define MAC_PHYCFG2_GMODE_MASK_MASK 0x001c0000 ++#define MAC_PHYCFG2_GMODE_MASK_AC131 0x001c0000 ++#define MAC_PHYCFG2_GMODE_MASK_50610 0x00100000 ++#define MAC_PHYCFG2_GMODE_MASK_RT8211 0x00000000 ++#define MAC_PHYCFG2_GMODE_MASK_RT8201 0x001c0000 ++#define MAC_PHYCFG2_GMODE_COMP_MASK 0x00e00000 ++#define MAC_PHYCFG2_GMODE_COMP_AC131 0x00e00000 ++#define MAC_PHYCFG2_GMODE_COMP_50610 0x00000000 ++#define MAC_PHYCFG2_GMODE_COMP_RT8211 0x00200000 ++#define MAC_PHYCFG2_GMODE_COMP_RT8201 0x00000000 ++#define MAC_PHYCFG2_ACT_MASK_MASK 0x03000000 ++#define MAC_PHYCFG2_ACT_MASK_AC131 0x03000000 ++#define MAC_PHYCFG2_ACT_MASK_50610 0x01000000 ++#define MAC_PHYCFG2_ACT_MASK_RT8211 0x03000000 ++#define MAC_PHYCFG2_ACT_MASK_RT8201 0x01000000 ++#define MAC_PHYCFG2_ACT_COMP_MASK 0x0c000000 ++#define MAC_PHYCFG2_ACT_COMP_AC131 0x00000000 ++#define MAC_PHYCFG2_ACT_COMP_50610 0x00000000 ++#define MAC_PHYCFG2_ACT_COMP_RT8211 0x00000000 ++#define MAC_PHYCFG2_ACT_COMP_RT8201 0x08000000 ++#define MAC_PHYCFG2_QUAL_MASK_MASK 0x30000000 ++#define MAC_PHYCFG2_QUAL_MASK_AC131 0x30000000 ++#define MAC_PHYCFG2_QUAL_MASK_50610 0x30000000 ++#define MAC_PHYCFG2_QUAL_MASK_RT8211 0x30000000 ++#define MAC_PHYCFG2_QUAL_MASK_RT8201 0x30000000 ++#define MAC_PHYCFG2_QUAL_COMP_MASK 0xc0000000 ++#define MAC_PHYCFG2_QUAL_COMP_AC131 0x00000000 ++#define MAC_PHYCFG2_QUAL_COMP_50610 0x00000000 ++#define MAC_PHYCFG2_QUAL_COMP_RT8211 0x00000000 ++#define MAC_PHYCFG2_QUAL_COMP_RT8201 0x00000000 ++#define MAC_PHYCFG2_50610_LED_MODES \ ++ (MAC_PHYCFG2_EMODE_MASK_50610 | \ ++ MAC_PHYCFG2_EMODE_COMP_50610 | \ ++ MAC_PHYCFG2_FMODE_MASK_50610 | \ ++ MAC_PHYCFG2_FMODE_COMP_50610 | \ ++ MAC_PHYCFG2_GMODE_MASK_50610 | \ ++ MAC_PHYCFG2_GMODE_COMP_50610 | \ ++ MAC_PHYCFG2_ACT_MASK_50610 | \ ++ MAC_PHYCFG2_ACT_COMP_50610 | \ ++ MAC_PHYCFG2_QUAL_MASK_50610 | \ ++ MAC_PHYCFG2_QUAL_COMP_50610) ++#define MAC_PHYCFG2_AC131_LED_MODES \ ++ (MAC_PHYCFG2_EMODE_MASK_AC131 | \ ++ MAC_PHYCFG2_EMODE_COMP_AC131 | \ ++ MAC_PHYCFG2_FMODE_MASK_AC131 | \ ++ MAC_PHYCFG2_FMODE_COMP_AC131 | \ ++ MAC_PHYCFG2_GMODE_MASK_AC131 | \ ++ MAC_PHYCFG2_GMODE_COMP_AC131 | \ ++ MAC_PHYCFG2_ACT_MASK_AC131 | \ ++ MAC_PHYCFG2_ACT_COMP_AC131 | \ ++ MAC_PHYCFG2_QUAL_MASK_AC131 | \ ++ MAC_PHYCFG2_QUAL_COMP_AC131) ++#define MAC_PHYCFG2_RTL8211C_LED_MODES \ ++ (MAC_PHYCFG2_EMODE_MASK_RT8211 | \ ++ MAC_PHYCFG2_EMODE_COMP_RT8211 | \ ++ MAC_PHYCFG2_FMODE_MASK_RT8211 | \ ++ MAC_PHYCFG2_FMODE_COMP_RT8211 | \ ++ MAC_PHYCFG2_GMODE_MASK_RT8211 | \ ++ MAC_PHYCFG2_GMODE_COMP_RT8211 | \ ++ MAC_PHYCFG2_ACT_MASK_RT8211 | \ ++ MAC_PHYCFG2_ACT_COMP_RT8211 | \ ++ MAC_PHYCFG2_QUAL_MASK_RT8211 | \ ++ MAC_PHYCFG2_QUAL_COMP_RT8211) ++#define MAC_PHYCFG2_RTL8201E_LED_MODES \ ++ (MAC_PHYCFG2_EMODE_MASK_RT8201 | \ ++ MAC_PHYCFG2_EMODE_COMP_RT8201 | \ ++ MAC_PHYCFG2_FMODE_MASK_RT8201 | \ ++ MAC_PHYCFG2_FMODE_COMP_RT8201 | \ ++ MAC_PHYCFG2_GMODE_MASK_RT8201 | \ ++ MAC_PHYCFG2_GMODE_COMP_RT8201 | \ ++ MAC_PHYCFG2_ACT_MASK_RT8201 | \ ++ MAC_PHYCFG2_ACT_COMP_RT8201 | \ ++ MAC_PHYCFG2_QUAL_MASK_RT8201 | \ ++ MAC_PHYCFG2_QUAL_COMP_RT8201) ++#define MAC_EXT_RGMII_MODE 0x000005a8 ++#define MAC_RGMII_MODE_TX_ENABLE 0x00000001 ++#define MAC_RGMII_MODE_TX_LOWPWR 0x00000002 ++#define MAC_RGMII_MODE_TX_RESET 0x00000004 ++#define MAC_RGMII_MODE_RX_INT_B 0x00000100 ++#define MAC_RGMII_MODE_RX_QUALITY 0x00000200 ++#define MAC_RGMII_MODE_RX_ACTIVITY 0x00000400 ++#define MAC_RGMII_MODE_RX_ENG_DET 0x00000800 ++/* 0x5ac --> 0x5b0 unused */ ++#define SERDES_RX_CTRL 0x000005b0 /* 5780/5714 only */ ++#define SERDES_RX_SIG_DETECT 0x00000400 ++#define SG_DIG_CTRL 0x000005b0 ++#define SG_DIG_USING_HW_AUTONEG 0x80000000 ++#define SG_DIG_SOFT_RESET 0x40000000 ++#define SG_DIG_DISABLE_LINKRDY 0x20000000 ++#define SG_DIG_CRC16_CLEAR_N 0x01000000 ++#define SG_DIG_EN10B 0x00800000 ++#define SG_DIG_CLEAR_STATUS 0x00400000 ++#define SG_DIG_LOCAL_DUPLEX_STATUS 0x00200000 ++#define SG_DIG_LOCAL_LINK_STATUS 0x00100000 ++#define SG_DIG_SPEED_STATUS_MASK 0x000c0000 ++#define SG_DIG_SPEED_STATUS_SHIFT 18 ++#define SG_DIG_JUMBO_PACKET_DISABLE 0x00020000 ++#define SG_DIG_RESTART_AUTONEG 0x00010000 ++#define SG_DIG_FIBER_MODE 0x00008000 ++#define SG_DIG_REMOTE_FAULT_MASK 0x00006000 ++#define SG_DIG_PAUSE_MASK 0x00001800 ++#define SG_DIG_PAUSE_CAP 0x00000800 ++#define SG_DIG_ASYM_PAUSE 0x00001000 ++#define SG_DIG_GBIC_ENABLE 0x00000400 ++#define SG_DIG_CHECK_END_ENABLE 0x00000200 ++#define SG_DIG_SGMII_AUTONEG_TIMER 0x00000100 ++#define SG_DIG_CLOCK_PHASE_SELECT 0x00000080 ++#define SG_DIG_GMII_INPUT_SELECT 0x00000040 ++#define SG_DIG_MRADV_CRC16_SELECT 0x00000020 ++#define SG_DIG_COMMA_DETECT_ENABLE 0x00000010 ++#define SG_DIG_AUTONEG_TIMER_REDUCE 0x00000008 ++#define SG_DIG_AUTONEG_LOW_ENABLE 0x00000004 ++#define SG_DIG_REMOTE_LOOPBACK 0x00000002 ++#define SG_DIG_LOOPBACK 0x00000001 ++#define SG_DIG_COMMON_SETUP (SG_DIG_CRC16_CLEAR_N | \ ++ SG_DIG_LOCAL_DUPLEX_STATUS | \ ++ SG_DIG_LOCAL_LINK_STATUS | \ ++ (0x2 << SG_DIG_SPEED_STATUS_SHIFT) | \ ++ SG_DIG_FIBER_MODE | SG_DIG_GBIC_ENABLE) ++#define SG_DIG_STATUS 0x000005b4 ++#define SG_DIG_CRC16_BUS_MASK 0xffff0000 ++#define SG_DIG_PARTNER_FAULT_MASK 0x00600000 /* If !MRADV_CRC16_SELECT */ ++#define SG_DIG_PARTNER_ASYM_PAUSE 0x00100000 /* If !MRADV_CRC16_SELECT */ ++#define SG_DIG_PARTNER_PAUSE_CAPABLE 0x00080000 /* If !MRADV_CRC16_SELECT */ ++#define SG_DIG_PARTNER_HALF_DUPLEX 0x00040000 /* If !MRADV_CRC16_SELECT */ ++#define SG_DIG_PARTNER_FULL_DUPLEX 0x00020000 /* If !MRADV_CRC16_SELECT */ ++#define SG_DIG_PARTNER_NEXT_PAGE 0x00010000 /* If !MRADV_CRC16_SELECT */ ++#define SG_DIG_AUTONEG_STATE_MASK 0x00000ff0 ++#define SG_DIG_IS_SERDES 0x00000100 ++#define SG_DIG_COMMA_DETECTOR 0x00000008 ++#define SG_DIG_MAC_ACK_STATUS 0x00000004 ++#define SG_DIG_AUTONEG_COMPLETE 0x00000002 ++#define SG_DIG_AUTONEG_ERROR 0x00000001 ++#define TG3_TX_TSTAMP_LSB 0x000005c0 ++#define TG3_TX_TSTAMP_MSB 0x000005c4 ++#define TG3_TSTAMP_MASK 0x7fffffffffffffff ++/* 0x5c8 --> 0x600 unused */ ++#define MAC_TX_MAC_STATE_BASE 0x00000600 /* 16 bytes */ ++#define MAC_RX_MAC_STATE_BASE 0x00000610 /* 20 bytes */ ++/* 0x624 --> 0x670 unused */ ++ ++#define MAC_RSS_INDIR_TBL_0 0x00000630 ++ ++#define MAC_RSS_HASH_KEY_0 0x00000670 ++#define MAC_RSS_HASH_KEY_1 0x00000674 ++#define MAC_RSS_HASH_KEY_2 0x00000678 ++#define MAC_RSS_HASH_KEY_3 0x0000067c ++#define MAC_RSS_HASH_KEY_4 0x00000680 ++#define MAC_RSS_HASH_KEY_5 0x00000684 ++#define MAC_RSS_HASH_KEY_6 0x00000688 ++#define MAC_RSS_HASH_KEY_7 0x0000068c ++#define MAC_RSS_HASH_KEY_8 0x00000690 ++#define MAC_RSS_HASH_KEY_9 0x00000694 ++/* 0x698 --> 0x6b0 unused */ ++ ++#define TG3_RX_TSTAMP_LSB 0x000006b0 ++#define TG3_RX_TSTAMP_MSB 0x000006b4 ++/* 0x6b8 --> 0x6c8 unused */ ++ ++#define TG3_RX_PTP_CTL 0x000006c8 ++#define TG3_RX_PTP_CTL_SYNC_EVNT 0x00000001 ++#define TG3_RX_PTP_CTL_DELAY_REQ 0x00000002 ++#define TG3_RX_PTP_CTL_PDLAY_REQ 0x00000004 ++#define TG3_RX_PTP_CTL_PDLAY_RES 0x00000008 ++#define TG3_RX_PTP_CTL_ALL_V1_EVENTS (TG3_RX_PTP_CTL_SYNC_EVNT | \ ++ TG3_RX_PTP_CTL_DELAY_REQ) ++#define TG3_RX_PTP_CTL_ALL_V2_EVENTS (TG3_RX_PTP_CTL_SYNC_EVNT | \ ++ TG3_RX_PTP_CTL_DELAY_REQ | \ ++ TG3_RX_PTP_CTL_PDLAY_REQ | \ ++ TG3_RX_PTP_CTL_PDLAY_RES) ++#define TG3_RX_PTP_CTL_FOLLOW_UP 0x00000100 ++#define TG3_RX_PTP_CTL_DELAY_RES 0x00000200 ++#define TG3_RX_PTP_CTL_PDRES_FLW_UP 0x00000400 ++#define TG3_RX_PTP_CTL_ANNOUNCE 0x00000800 ++#define TG3_RX_PTP_CTL_SIGNALING 0x00001000 ++#define TG3_RX_PTP_CTL_MANAGEMENT 0x00002000 ++#define TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN 0x00800000 ++#define TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN 0x01000000 ++#define TG3_RX_PTP_CTL_RX_PTP_V2_EN (TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | \ ++ TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN) ++#define TG3_RX_PTP_CTL_RX_PTP_V1_EN 0x02000000 ++#define TG3_RX_PTP_CTL_HWTS_INTERLOCK 0x04000000 ++/* 0x6cc --> 0x800 unused */ ++ ++#define MAC_TX_STATS_OCTETS 0x00000800 ++#define MAC_TX_STATS_RESV1 0x00000804 ++#define MAC_TX_STATS_COLLISIONS 0x00000808 ++#define MAC_TX_STATS_XON_SENT 0x0000080c ++#define MAC_TX_STATS_XOFF_SENT 0x00000810 ++#define MAC_TX_STATS_RESV2 0x00000814 ++#define MAC_TX_STATS_MAC_ERRORS 0x00000818 ++#define MAC_TX_STATS_SINGLE_COLLISIONS 0x0000081c ++#define MAC_TX_STATS_MULT_COLLISIONS 0x00000820 ++#define MAC_TX_STATS_DEFERRED 0x00000824 ++#define MAC_TX_STATS_RESV3 0x00000828 ++#define MAC_TX_STATS_EXCESSIVE_COL 0x0000082c ++#define MAC_TX_STATS_LATE_COL 0x00000830 ++#define MAC_TX_STATS_RESV4_1 0x00000834 ++#define MAC_TX_STATS_RESV4_2 0x00000838 ++#define MAC_TX_STATS_RESV4_3 0x0000083c ++#define MAC_TX_STATS_RESV4_4 0x00000840 ++#define MAC_TX_STATS_RESV4_5 0x00000844 ++#define MAC_TX_STATS_RESV4_6 0x00000848 ++#define MAC_TX_STATS_RESV4_7 0x0000084c ++#define MAC_TX_STATS_RESV4_8 0x00000850 ++#define MAC_TX_STATS_RESV4_9 0x00000854 ++#define MAC_TX_STATS_RESV4_10 0x00000858 ++#define MAC_TX_STATS_RESV4_11 0x0000085c ++#define MAC_TX_STATS_RESV4_12 0x00000860 ++#define MAC_TX_STATS_RESV4_13 0x00000864 ++#define MAC_TX_STATS_RESV4_14 0x00000868 ++#define MAC_TX_STATS_UCAST 0x0000086c ++#define MAC_TX_STATS_MCAST 0x00000870 ++#define MAC_TX_STATS_BCAST 0x00000874 ++#define MAC_TX_STATS_RESV5_1 0x00000878 ++#define MAC_TX_STATS_RESV5_2 0x0000087c ++#define MAC_RX_STATS_OCTETS 0x00000880 ++#define MAC_RX_STATS_RESV1 0x00000884 ++#define MAC_RX_STATS_FRAGMENTS 0x00000888 ++#define MAC_RX_STATS_UCAST 0x0000088c ++#define MAC_RX_STATS_MCAST 0x00000890 ++#define MAC_RX_STATS_BCAST 0x00000894 ++#define MAC_RX_STATS_FCS_ERRORS 0x00000898 ++#define MAC_RX_STATS_ALIGN_ERRORS 0x0000089c ++#define MAC_RX_STATS_XON_PAUSE_RECVD 0x000008a0 ++#define MAC_RX_STATS_XOFF_PAUSE_RECVD 0x000008a4 ++#define MAC_RX_STATS_MAC_CTRL_RECVD 0x000008a8 ++#define MAC_RX_STATS_XOFF_ENTERED 0x000008ac ++#define MAC_RX_STATS_FRAME_TOO_LONG 0x000008b0 ++#define MAC_RX_STATS_JABBERS 0x000008b4 ++#define MAC_RX_STATS_UNDERSIZE 0x000008b8 ++/* 0x8bc --> 0xc00 unused */ ++ ++/* Send data initiator control registers */ ++#define SNDDATAI_MODE 0x00000c00 ++#define SNDDATAI_MODE_RESET 0x00000001 ++#define SNDDATAI_MODE_ENABLE 0x00000002 ++#define SNDDATAI_MODE_STAT_OFLOW_ENAB 0x00000004 ++#define SNDDATAI_STATUS 0x00000c04 ++#define SNDDATAI_STATUS_STAT_OFLOW 0x00000004 ++#define SNDDATAI_STATSCTRL 0x00000c08 ++#define SNDDATAI_SCTRL_ENABLE 0x00000001 ++#define SNDDATAI_SCTRL_FASTUPD 0x00000002 ++#define SNDDATAI_SCTRL_CLEAR 0x00000004 ++#define SNDDATAI_SCTRL_FLUSH 0x00000008 ++#define SNDDATAI_SCTRL_FORCE_ZERO 0x00000010 ++#define SNDDATAI_STATSENAB 0x00000c0c ++#define SNDDATAI_STATSINCMASK 0x00000c10 ++#define ISO_PKT_TX 0x00000c20 ++/* 0xc24 --> 0xc80 unused */ ++#define SNDDATAI_COS_CNT_0 0x00000c80 ++#define SNDDATAI_COS_CNT_1 0x00000c84 ++#define SNDDATAI_COS_CNT_2 0x00000c88 ++#define SNDDATAI_COS_CNT_3 0x00000c8c ++#define SNDDATAI_COS_CNT_4 0x00000c90 ++#define SNDDATAI_COS_CNT_5 0x00000c94 ++#define SNDDATAI_COS_CNT_6 0x00000c98 ++#define SNDDATAI_COS_CNT_7 0x00000c9c ++#define SNDDATAI_COS_CNT_8 0x00000ca0 ++#define SNDDATAI_COS_CNT_9 0x00000ca4 ++#define SNDDATAI_COS_CNT_10 0x00000ca8 ++#define SNDDATAI_COS_CNT_11 0x00000cac ++#define SNDDATAI_COS_CNT_12 0x00000cb0 ++#define SNDDATAI_COS_CNT_13 0x00000cb4 ++#define SNDDATAI_COS_CNT_14 0x00000cb8 ++#define SNDDATAI_COS_CNT_15 0x00000cbc ++#define SNDDATAI_DMA_RDQ_FULL_CNT 0x00000cc0 ++#define SNDDATAI_DMA_PRIO_RDQ_FULL_CNT 0x00000cc4 ++#define SNDDATAI_SDCQ_FULL_CNT 0x00000cc8 ++#define SNDDATAI_NICRNG_SSND_PIDX_CNT 0x00000ccc ++#define SNDDATAI_STATS_UPDATED_CNT 0x00000cd0 ++#define SNDDATAI_INTERRUPTS_CNT 0x00000cd4 ++#define SNDDATAI_AVOID_INTERRUPTS_CNT 0x00000cd8 ++#define SNDDATAI_SND_THRESH_HIT_CNT 0x00000cdc ++/* 0xce0 --> 0x1000 unused */ ++ ++/* Send data completion control registers */ ++#define SNDDATAC_MODE 0x00001000 ++#define SNDDATAC_MODE_RESET 0x00000001 ++#define SNDDATAC_MODE_ENABLE 0x00000002 ++#define SNDDATAC_MODE_CDELAY 0x00000010 ++/* 0x1004 --> 0x1400 unused */ ++ ++/* Send BD ring selector */ ++#define SNDBDS_MODE 0x00001400 ++#define SNDBDS_MODE_RESET 0x00000001 ++#define SNDBDS_MODE_ENABLE 0x00000002 ++#define SNDBDS_MODE_ATTN_ENABLE 0x00000004 ++#define SNDBDS_STATUS 0x00001404 ++#define SNDBDS_STATUS_ERROR_ATTN 0x00000004 ++#define SNDBDS_HWDIAG 0x00001408 ++/* 0x140c --> 0x1440 */ ++#define SNDBDS_SEL_CON_IDX_0 0x00001440 ++#define SNDBDS_SEL_CON_IDX_1 0x00001444 ++#define SNDBDS_SEL_CON_IDX_2 0x00001448 ++#define SNDBDS_SEL_CON_IDX_3 0x0000144c ++#define SNDBDS_SEL_CON_IDX_4 0x00001450 ++#define SNDBDS_SEL_CON_IDX_5 0x00001454 ++#define SNDBDS_SEL_CON_IDX_6 0x00001458 ++#define SNDBDS_SEL_CON_IDX_7 0x0000145c ++#define SNDBDS_SEL_CON_IDX_8 0x00001460 ++#define SNDBDS_SEL_CON_IDX_9 0x00001464 ++#define SNDBDS_SEL_CON_IDX_10 0x00001468 ++#define SNDBDS_SEL_CON_IDX_11 0x0000146c ++#define SNDBDS_SEL_CON_IDX_12 0x00001470 ++#define SNDBDS_SEL_CON_IDX_13 0x00001474 ++#define SNDBDS_SEL_CON_IDX_14 0x00001478 ++#define SNDBDS_SEL_CON_IDX_15 0x0000147c ++/* 0x1480 --> 0x1800 unused */ ++ ++/* Send BD initiator control registers */ ++#define SNDBDI_MODE 0x00001800 ++#define SNDBDI_MODE_RESET 0x00000001 ++#define SNDBDI_MODE_ENABLE 0x00000002 ++#define SNDBDI_MODE_ATTN_ENABLE 0x00000004 ++#define SNDBDI_MODE_MULTI_TXQ_EN 0x00000020 ++#define SNDBDI_STATUS 0x00001804 ++#define SNDBDI_STATUS_ERROR_ATTN 0x00000004 ++#define SNDBDI_IN_PROD_IDX_0 0x00001808 ++#define SNDBDI_IN_PROD_IDX_1 0x0000180c ++#define SNDBDI_IN_PROD_IDX_2 0x00001810 ++#define SNDBDI_IN_PROD_IDX_3 0x00001814 ++#define SNDBDI_IN_PROD_IDX_4 0x00001818 ++#define SNDBDI_IN_PROD_IDX_5 0x0000181c ++#define SNDBDI_IN_PROD_IDX_6 0x00001820 ++#define SNDBDI_IN_PROD_IDX_7 0x00001824 ++#define SNDBDI_IN_PROD_IDX_8 0x00001828 ++#define SNDBDI_IN_PROD_IDX_9 0x0000182c ++#define SNDBDI_IN_PROD_IDX_10 0x00001830 ++#define SNDBDI_IN_PROD_IDX_11 0x00001834 ++#define SNDBDI_IN_PROD_IDX_12 0x00001838 ++#define SNDBDI_IN_PROD_IDX_13 0x0000183c ++#define SNDBDI_IN_PROD_IDX_14 0x00001840 ++#define SNDBDI_IN_PROD_IDX_15 0x00001844 ++/* 0x1848 --> 0x1c00 unused */ ++ ++/* Send BD completion control registers */ ++#define SNDBDC_MODE 0x00001c00 ++#define SNDBDC_MODE_RESET 0x00000001 ++#define SNDBDC_MODE_ENABLE 0x00000002 ++#define SNDBDC_MODE_ATTN_ENABLE 0x00000004 ++/* 0x1c04 --> 0x2000 unused */ ++ ++/* Receive list placement control registers */ ++#define RCVLPC_MODE 0x00002000 ++#define RCVLPC_MODE_RESET 0x00000001 ++#define RCVLPC_MODE_ENABLE 0x00000002 ++#define RCVLPC_MODE_CLASS0_ATTN_ENAB 0x00000004 ++#define RCVLPC_MODE_MAPOOR_AATTN_ENAB 0x00000008 ++#define RCVLPC_MODE_STAT_OFLOW_ENAB 0x00000010 ++#define RCVLPC_STATUS 0x00002004 ++#define RCVLPC_STATUS_CLASS0 0x00000004 ++#define RCVLPC_STATUS_MAPOOR 0x00000008 ++#define RCVLPC_STATUS_STAT_OFLOW 0x00000010 ++#define RCVLPC_LOCK 0x00002008 ++#define RCVLPC_LOCK_REQ_MASK 0x0000ffff ++#define RCVLPC_LOCK_REQ_SHIFT 0 ++#define RCVLPC_LOCK_GRANT_MASK 0xffff0000 ++#define RCVLPC_LOCK_GRANT_SHIFT 16 ++#define RCVLPC_NON_EMPTY_BITS 0x0000200c ++#define RCVLPC_NON_EMPTY_BITS_MASK 0x0000ffff ++#define RCVLPC_CONFIG 0x00002010 ++#define RCVLPC_STATSCTRL 0x00002014 ++#define RCVLPC_STATSCTRL_ENABLE 0x00000001 ++#define RCVLPC_STATSCTRL_FASTUPD 0x00000002 ++#define RCVLPC_STATS_ENABLE 0x00002018 ++#define RCVLPC_STATSENAB_ASF_FIX 0x00000002 ++#define RCVLPC_STATSENAB_DACK_FIX 0x00040000 ++#define RCVLPC_STATSENAB_LNGBRST_RFIX 0x00400000 ++#define RCVLPC_STATS_INCMASK 0x0000201c ++/* 0x2020 --> 0x2100 unused */ ++#define RCVLPC_SELLST_BASE 0x00002100 /* 16 16-byte entries */ ++#define SELLST_TAIL 0x00000004 ++#define SELLST_CONT 0x00000008 ++#define SELLST_UNUSED 0x0000000c ++#define RCVLPC_COS_CNTL_BASE 0x00002200 /* 16 4-byte entries */ ++#define RCVLPC_DROP_FILTER_CNT 0x00002240 ++#define RCVLPC_DMA_WQ_FULL_CNT 0x00002244 ++#define RCVLPC_DMA_HIPRIO_WQ_FULL_CNT 0x00002248 ++#define RCVLPC_NO_RCV_BD_CNT 0x0000224c ++#define RCVLPC_IN_DISCARDS_CNT 0x00002250 ++#define RCVLPC_IN_ERRORS_CNT 0x00002254 ++#define RCVLPC_RCV_THRESH_HIT_CNT 0x00002258 ++/* 0x225c --> 0x2400 unused */ ++ ++/* Receive Data and Receive BD Initiator Control */ ++#define RCVDBDI_MODE 0x00002400 ++#define RCVDBDI_MODE_RESET 0x00000001 ++#define RCVDBDI_MODE_ENABLE 0x00000002 ++#define RCVDBDI_MODE_JUMBOBD_NEEDED 0x00000004 ++#define RCVDBDI_MODE_FRM_TOO_BIG 0x00000008 ++#define RCVDBDI_MODE_INV_RING_SZ 0x00000010 ++#define RCVDBDI_MODE_LRG_RING_SZ 0x00010000 ++#define RCVDBDI_STATUS 0x00002404 ++#define RCVDBDI_STATUS_JUMBOBD_NEEDED 0x00000004 ++#define RCVDBDI_STATUS_FRM_TOO_BIG 0x00000008 ++#define RCVDBDI_STATUS_INV_RING_SZ 0x00000010 ++#define RCVDBDI_SPLIT_FRAME_MINSZ 0x00002408 ++#define VRQ_STATUS 0x0000240c ++#define VRQ_FLUSH_CTRL 0x00002410 ++#define VRQ_FLUSH_ENABLE 0x00000001 ++#define VRQ_FLUSH_RESET_ENABLE 0x00000002 ++#define VRQ_FLUSH_STATUPDT_INT_ENABLE 0x00000004 ++#define VRQ_FLUSH_DISCARD_PKT_ENABLE 0x00000008 ++#define VRQ_FLUSH_SW_FLUSH 0x00000100 ++/* 0x2414 --> 0x2440 unused */ ++ ++#define RCVDBDI_JUMBO_BD 0x00002440 /* TG3_BDINFO_... */ ++#define RCVDBDI_STD_BD 0x00002450 /* TG3_BDINFO_... */ ++#define RCVDBDI_MINI_BD 0x00002460 /* TG3_BDINFO_... */ ++#define RCVDBDI_JUMBO_CON_IDX 0x00002470 ++#define RCVDBDI_STD_CON_IDX 0x00002474 ++#define RCVDBDI_MINI_CON_IDX 0x00002478 ++/* 0x247c --> 0x2480 unused */ ++#define RCVDBDI_BD_PROD_IDX_0 0x00002480 ++#define RCVDBDI_BD_PROD_IDX_1 0x00002484 ++#define RCVDBDI_BD_PROD_IDX_2 0x00002488 ++#define RCVDBDI_BD_PROD_IDX_3 0x0000248c ++#define RCVDBDI_BD_PROD_IDX_4 0x00002490 ++#define RCVDBDI_BD_PROD_IDX_5 0x00002494 ++#define RCVDBDI_BD_PROD_IDX_6 0x00002498 ++#define RCVDBDI_BD_PROD_IDX_7 0x0000249c ++#define RCVDBDI_BD_PROD_IDX_8 0x000024a0 ++#define RCVDBDI_BD_PROD_IDX_9 0x000024a4 ++#define RCVDBDI_BD_PROD_IDX_10 0x000024a8 ++#define RCVDBDI_BD_PROD_IDX_11 0x000024ac ++#define RCVDBDI_BD_PROD_IDX_12 0x000024b0 ++#define RCVDBDI_BD_PROD_IDX_13 0x000024b4 ++#define RCVDBDI_BD_PROD_IDX_14 0x000024b8 ++#define RCVDBDI_BD_PROD_IDX_15 0x000024bc ++#define RCVDBDI_HWDIAG 0x000024c0 ++/* 0x24c4 --> 0x2800 unused */ ++ ++#define RCVDBDI_JMB_BD_RING1 0x00002500 ++/* 0x2504 --> 0x2800 unused */ ++ ++/* Receive Data Completion Control */ ++#define RCVDCC_MODE 0x00002800 ++#define RCVDCC_MODE_RESET 0x00000001 ++#define RCVDCC_MODE_ENABLE 0x00000002 ++#define RCVDCC_MODE_ATTN_ENABLE 0x00000004 ++/* 0x2804 --> 0x2c00 unused */ ++ ++/* Receive BD Initiator Control Registers */ ++#define RCVBDI_MODE 0x00002c00 ++#define RCVBDI_MODE_RESET 0x00000001 ++#define RCVBDI_MODE_ENABLE 0x00000002 ++#define RCVBDI_MODE_RCB_ATTN_ENAB 0x00000004 ++#define RCVBDI_STATUS 0x00002c04 ++#define RCVBDI_STATUS_RCB_ATTN 0x00000004 ++#define RCVBDI_JUMBO_PROD_IDX 0x00002c08 ++#define RCVBDI_STD_PROD_IDX 0x00002c0c ++#define RCVBDI_MINI_PROD_IDX 0x00002c10 ++#define RCVBDI_MINI_THRESH 0x00002c14 ++#define RCVBDI_STD_THRESH 0x00002c18 ++#define RCVBDI_JUMBO_THRESH 0x00002c1c ++/* 0x2c20 --> 0x2d00 unused */ ++ ++#define STD_REPLENISH_LWM 0x00002d00 ++#define JMB_REPLENISH_LWM 0x00002d04 ++/* 0x2d08 --> 0x3000 unused */ ++ ++/* Receive BD Completion Control Registers */ ++#define RCVCC_MODE 0x00003000 ++#define RCVCC_MODE_RESET 0x00000001 ++#define RCVCC_MODE_ENABLE 0x00000002 ++#define RCVCC_MODE_ATTN_ENABLE 0x00000004 ++#define RCVCC_STATUS 0x00003004 ++#define RCVCC_STATUS_ERROR_ATTN 0x00000004 ++#define RCVCC_JUMP_PROD_IDX 0x00003008 ++#define RCVCC_STD_PROD_IDX 0x0000300c ++#define RCVCC_MINI_PROD_IDX 0x00003010 ++/* 0x3014 --> 0x3400 unused */ ++ ++/* Receive list selector control registers */ ++#define RCVLSC_MODE 0x00003400 ++#define RCVLSC_MODE_RESET 0x00000001 ++#define RCVLSC_MODE_ENABLE 0x00000002 ++#define RCVLSC_MODE_ATTN_ENABLE 0x00000004 ++#define RCVLSC_STATUS 0x00003404 ++#define RCVLSC_STATUS_ERROR_ATTN 0x00000004 ++/* 0x3408 --> 0x3600 unused */ ++ ++#define TG3_CPMU_DRV_STATUS 0x0000344c ++ ++/* CPMU registers */ ++#define TG3_CPMU_CTRL 0x00003600 ++#define CPMU_CTRL_LINK_IDLE_MODE 0x00000200 ++#define CPMU_CTRL_LINK_AWARE_MODE 0x00000400 ++#define CPMU_CTRL_LINK_SPEED_MODE 0x00004000 ++#define CPMU_CTRL_GPHY_10MB_RXONLY 0x00010000 ++#define TG3_CPMU_LSPD_10MB_CLK 0x00003604 ++#define CPMU_LSPD_10MB_MACCLK_MASK 0x001f0000 ++#define CPMU_LSPD_10MB_MACCLK_6_25 0x00130000 ++/* 0x3608 --> 0x360c unused */ ++ ++#define TG3_CPMU_LSPD_1000MB_CLK 0x0000360c ++#define CPMU_LSPD_1000MB_MACCLK_62_5 0x00000000 ++#define CPMU_LSPD_1000MB_MACCLK_12_5 0x00110000 ++#define CPMU_LSPD_1000MB_MACCLK_MASK 0x001f0000 ++#define TG3_CPMU_LNK_AWARE_PWRMD 0x00003610 ++#define CPMU_LNK_AWARE_MACCLK_MASK 0x001f0000 ++#define CPMU_LNK_AWARE_MACCLK_6_25 0x00130000 ++/* 0x3614 --> 0x361c unused */ ++ ++#define TG3_CPMU_HST_ACC 0x0000361c ++#define CPMU_HST_ACC_MACCLK_MASK 0x001f0000 ++#define CPMU_HST_ACC_MACCLK_6_25 0x00130000 ++/* 0x3620 --> 0x3630 unused */ ++ ++#define TG3_CPMU_CLCK_ORIDE 0x00003624 ++#define CPMU_CLCK_ORIDE_MAC_ORIDE_EN 0x80000000 ++ ++#define TG3_CPMU_CLCK_ORIDE_ENABLE 0x00003628 ++#define TG3_CPMU_MAC_ORIDE_ENABLE (1 << 13) ++ ++#define TG3_CPMU_STATUS 0x0000362c ++#define TG3_CPMU_STATUS_FMSK_5717 0x20000000 ++#define TG3_CPMU_STATUS_FMSK_5719 0xc0000000 ++#define TG3_CPMU_STATUS_FSHFT_5719 30 ++#define TG3_CPMU_STATUS_LINK_MASK 0x180000 ++ ++#define TG3_CPMU_CLCK_STAT 0x00003630 ++#define CPMU_CLCK_STAT_MAC_CLCK_MASK 0x001f0000 ++#define CPMU_CLCK_STAT_MAC_CLCK_62_5 0x00000000 ++#define CPMU_CLCK_STAT_MAC_CLCK_12_5 0x00110000 ++#define CPMU_CLCK_STAT_MAC_CLCK_6_25 0x00130000 ++/* 0x3634 --> 0x365c unused */ ++ ++#define TG3_CPMU_MUTEX_REQ 0x0000365c ++#define CPMU_MUTEX_REQ_DRIVER 0x00001000 ++#define TG3_CPMU_MUTEX_GNT 0x00003660 ++#define CPMU_MUTEX_GNT_DRIVER 0x00001000 ++#define TG3_CPMU_PHY_STRAP 0x00003664 ++#define TG3_CPMU_PHY_STRAP_IS_SERDES 0x00000020 ++#define TG3_CPMU_PADRNG_CTL 0x00003668 ++#define TG3_CPMU_PADRNG_CTL_RDIV2 0x00040000 ++/* 0x3664 --> 0x36b0 unused */ ++ ++#define TG3_CPMU_EEE_MODE 0x000036b0 ++#define TG3_CPMU_EEEMD_APE_TX_DET_EN 0x00000004 ++#define TG3_CPMU_EEEMD_ERLY_L1_XIT_DET 0x00000008 ++#define TG3_CPMU_EEEMD_SND_IDX_DET_EN 0x00000040 ++#define TG3_CPMU_EEEMD_LPI_ENABLE 0x00000080 ++#define TG3_CPMU_EEEMD_LPI_IN_TX 0x00000100 ++#define TG3_CPMU_EEEMD_LPI_IN_RX 0x00000200 ++#define TG3_CPMU_EEEMD_EEE_ENABLE 0x00100000 ++#define TG3_CPMU_EEE_DBTMR1 0x000036b4 ++#define TG3_CPMU_DBTMR1_PCIEXIT_2047US 0x07ff0000 ++#define TG3_CPMU_DBTMR1_LNKIDLE_2047US 0x000007ff ++#define TG3_CPMU_DBTMR1_LNKIDLE_MAX 0x0000ffff ++#define TG3_CPMU_EEE_DBTMR2 0x000036b8 ++#define TG3_CPMU_DBTMR2_APE_TX_2047US 0x07ff0000 ++#define TG3_CPMU_DBTMR2_TXIDXEQ_2047US 0x000007ff ++#define TG3_CPMU_EEE_LNKIDL_CTRL 0x000036bc ++#define TG3_CPMU_EEE_LNKIDL_PCIE_NL0 0x01000000 ++#define TG3_CPMU_EEE_LNKIDL_UART_IDL 0x00000004 ++#define TG3_CPMU_EEE_LNKIDL_APE_TX_MT 0x00000002 ++/* 0x36c0 --> 0x36d0 unused */ ++ ++#define TG3_CPMU_EEE_CTRL 0x000036d0 ++#define TG3_CPMU_EEE_CTRL_EXIT_16_5_US 0x0000019d ++#define TG3_CPMU_EEE_CTRL_EXIT_36_US 0x00000384 ++#define TG3_CPMU_EEE_CTRL_EXIT_20_1_US 0x000001f8 ++/* 0x36d4 --> 0x3800 unused */ ++ ++/* Mbuf cluster free registers */ ++#define MBFREE_MODE 0x00003800 ++#define MBFREE_MODE_RESET 0x00000001 ++#define MBFREE_MODE_ENABLE 0x00000002 ++#define MBFREE_STATUS 0x00003804 ++/* 0x3808 --> 0x3c00 unused */ ++ ++/* Host coalescing control registers */ ++#define HOSTCC_MODE 0x00003c00 ++#define HOSTCC_MODE_RESET 0x00000001 ++#define HOSTCC_MODE_ENABLE 0x00000002 ++#define HOSTCC_MODE_ATTN 0x00000004 ++#define HOSTCC_MODE_NOW 0x00000008 ++#define HOSTCC_MODE_FULL_STATUS 0x00000000 ++#define HOSTCC_MODE_64BYTE 0x00000080 ++#define HOSTCC_MODE_32BYTE 0x00000100 ++#define HOSTCC_MODE_CLRTICK_RXBD 0x00000200 ++#define HOSTCC_MODE_CLRTICK_TXBD 0x00000400 ++#define HOSTCC_MODE_NOINT_ON_NOW 0x00000800 ++#define HOSTCC_MODE_NOINT_ON_FORCE 0x00001000 ++#define HOSTCC_MODE_COAL_VEC1_NOW 0x00002000 ++#define HOSTCC_STATUS 0x00003c04 ++#define HOSTCC_STATUS_ERROR_ATTN 0x00000004 ++#define HOSTCC_RXCOL_TICKS 0x00003c08 ++#define LOW_RXCOL_TICKS 0x00000032 ++#if defined(__VMKLNX__) ++#define LOW_RXCOL_TICKS_CLRTCKS 0x00000012 ++#else ++#define LOW_RXCOL_TICKS_CLRTCKS 0x00000014 ++#endif ++#define DEFAULT_RXCOL_TICKS 0x00000048 ++#define HIGH_RXCOL_TICKS 0x00000096 ++#define MAX_RXCOL_TICKS 0x000003ff ++#define HOSTCC_TXCOL_TICKS 0x00003c0c ++#define LOW_TXCOL_TICKS 0x00000096 ++#define LOW_TXCOL_TICKS_CLRTCKS 0x00000048 ++#define DEFAULT_TXCOL_TICKS 0x0000012c ++#define HIGH_TXCOL_TICKS 0x00000145 ++#define MAX_TXCOL_TICKS 0x000003ff ++#define HOSTCC_RXMAX_FRAMES 0x00003c10 ++#if defined(__VMKLNX__) ++#define LOW_RXMAX_FRAMES 0x0000000f ++#else ++#define LOW_RXMAX_FRAMES 0x00000005 ++#endif ++#define DEFAULT_RXMAX_FRAMES 0x00000008 ++#define HIGH_RXMAX_FRAMES 0x00000012 ++#define MAX_RXMAX_FRAMES 0x000000ff ++#define HOSTCC_TXMAX_FRAMES 0x00003c14 ++#define LOW_TXMAX_FRAMES 0x00000035 ++#define DEFAULT_TXMAX_FRAMES 0x0000004b ++#define HIGH_TXMAX_FRAMES 0x00000052 ++#define MAX_TXMAX_FRAMES 0x000000ff ++#define HOSTCC_RXCOAL_TICK_INT 0x00003c18 ++#define DEFAULT_RXCOAL_TICK_INT 0x00000019 ++#define DEFAULT_RXCOAL_TICK_INT_CLRTCKS 0x00000014 ++#define MAX_RXCOAL_TICK_INT 0x000003ff ++#define HOSTCC_TXCOAL_TICK_INT 0x00003c1c ++#define DEFAULT_TXCOAL_TICK_INT 0x00000019 ++#define DEFAULT_TXCOAL_TICK_INT_CLRTCKS 0x00000014 ++#define MAX_TXCOAL_TICK_INT 0x000003ff ++#define HOSTCC_RXCOAL_MAXF_INT 0x00003c20 ++#define DEFAULT_RXCOAL_MAXF_INT 0x00000005 ++#define MAX_RXCOAL_MAXF_INT 0x000000ff ++#define HOSTCC_TXCOAL_MAXF_INT 0x00003c24 ++#define DEFAULT_TXCOAL_MAXF_INT 0x00000005 ++#define MAX_TXCOAL_MAXF_INT 0x000000ff ++#define HOSTCC_STAT_COAL_TICKS 0x00003c28 ++#define DEFAULT_STAT_COAL_TICKS 0x000f4240 ++#define MAX_STAT_COAL_TICKS 0xd693d400 ++#define MIN_STAT_COAL_TICKS 0x00000064 ++#define HOSTCC_PARAM_SET_RESET 0x00003c28 ++/* 0x3c2c --> 0x3c30 unused */ ++#define HOSTCC_STATS_BLK_HOST_ADDR 0x00003c30 /* 64-bit */ ++#define HOSTCC_STATUS_BLK_HOST_ADDR 0x00003c38 /* 64-bit */ ++#define HOSTCC_STATS_BLK_NIC_ADDR 0x00003c40 ++#define HOSTCC_STATUS_BLK_NIC_ADDR 0x00003c44 ++#define HOSTCC_FLOW_ATTN 0x00003c48 ++#define HOSTCC_FLOW_ATTN_MBUF_LWM 0x00000040 ++#define HOSTCC_FLOW_ATTN_RCB_MISCFG 0x00020000 ++#define HOSTCC_FLOW_ATTN_RCV_BDI_ATTN 0x00800000 ++/* 0x3c4c --> 0x3c50 unused */ ++#define HOSTCC_JUMBO_CON_IDX 0x00003c50 ++#define HOSTCC_STD_CON_IDX 0x00003c54 ++#define HOSTCC_MINI_CON_IDX 0x00003c58 ++/* 0x3c5c --> 0x3c80 unused */ ++#define HOSTCC_RET_PROD_IDX_0 0x00003c80 ++#define HOSTCC_RET_PROD_IDX_1 0x00003c84 ++#define HOSTCC_RET_PROD_IDX_2 0x00003c88 ++#define HOSTCC_RET_PROD_IDX_3 0x00003c8c ++#define HOSTCC_RET_PROD_IDX_4 0x00003c90 ++#define HOSTCC_RET_PROD_IDX_5 0x00003c94 ++#define HOSTCC_RET_PROD_IDX_6 0x00003c98 ++#define HOSTCC_RET_PROD_IDX_7 0x00003c9c ++#define HOSTCC_RET_PROD_IDX_8 0x00003ca0 ++#define HOSTCC_RET_PROD_IDX_9 0x00003ca4 ++#define HOSTCC_RET_PROD_IDX_10 0x00003ca8 ++#define HOSTCC_RET_PROD_IDX_11 0x00003cac ++#define HOSTCC_RET_PROD_IDX_12 0x00003cb0 ++#define HOSTCC_RET_PROD_IDX_13 0x00003cb4 ++#define HOSTCC_RET_PROD_IDX_14 0x00003cb8 ++#define HOSTCC_RET_PROD_IDX_15 0x00003cbc ++#define HOSTCC_SND_CON_IDX_0 0x00003cc0 ++#define HOSTCC_SND_CON_IDX_1 0x00003cc4 ++#define HOSTCC_SND_CON_IDX_2 0x00003cc8 ++#define HOSTCC_SND_CON_IDX_3 0x00003ccc ++#define HOSTCC_SND_CON_IDX_4 0x00003cd0 ++#define HOSTCC_SND_CON_IDX_5 0x00003cd4 ++#define HOSTCC_SND_CON_IDX_6 0x00003cd8 ++#define HOSTCC_SND_CON_IDX_7 0x00003cdc ++#define HOSTCC_SND_CON_IDX_8 0x00003ce0 ++#define HOSTCC_SND_CON_IDX_9 0x00003ce4 ++#define HOSTCC_SND_CON_IDX_10 0x00003ce8 ++#define HOSTCC_SND_CON_IDX_11 0x00003cec ++#define HOSTCC_SND_CON_IDX_12 0x00003cf0 ++#define HOSTCC_SND_CON_IDX_13 0x00003cf4 ++#define HOSTCC_SND_CON_IDX_14 0x00003cf8 ++#define HOSTCC_SND_CON_IDX_15 0x00003cfc ++#define HOSTCC_STATBLCK_RING1 0x00003d00 ++/* 0x3d00 --> 0x3d80 unused */ ++ ++#define HOSTCC_RXCOL_TICKS_VEC1 0x00003d80 ++#define HOSTCC_TXCOL_TICKS_VEC1 0x00003d84 ++#define HOSTCC_RXMAX_FRAMES_VEC1 0x00003d88 ++#define HOSTCC_TXMAX_FRAMES_VEC1 0x00003d8c ++#define HOSTCC_RXCOAL_MAXF_INT_VEC1 0x00003d90 ++#define HOSTCC_TXCOAL_MAXF_INT_VEC1 0x00003d94 ++/* 0x3d98 --> 0x4000 unused */ ++ ++/* Memory arbiter control registers */ ++#define MEMARB_MODE 0x00004000 ++#define MEMARB_MODE_RESET 0x00000001 ++#define MEMARB_MODE_ENABLE 0x00000002 ++#define MEMARB_STATUS 0x00004004 ++#define MEMARB_TRAP_ADDR_LOW 0x00004008 ++#define MEMARB_TRAP_ADDR_HIGH 0x0000400c ++/* 0x4010 --> 0x4400 unused */ ++ ++/* Buffer manager control registers */ ++#define BUFMGR_MODE 0x00004400 ++#define BUFMGR_MODE_RESET 0x00000001 ++#define BUFMGR_MODE_ENABLE 0x00000002 ++#define BUFMGR_MODE_ATTN_ENABLE 0x00000004 ++#define BUFMGR_MODE_BM_TEST 0x00000008 ++#define BUFMGR_MODE_MBLOW_ATTN_ENAB 0x00000010 ++#define BUFMGR_MODE_NO_TX_UNDERRUN 0x80000000 ++#define BUFMGR_STATUS 0x00004404 ++#define BUFMGR_STATUS_ERROR 0x00000004 ++#define BUFMGR_STATUS_MBLOW 0x00000010 ++#define BUFMGR_MB_POOL_ADDR 0x00004408 ++#define BUFMGR_MB_POOL_SIZE 0x0000440c ++#define BUFMGR_MB_RDMA_LOW_WATER 0x00004410 ++#define DEFAULT_MB_RDMA_LOW_WATER 0x00000050 ++#define DEFAULT_MB_RDMA_LOW_WATER_5705 0x00000000 ++#define DEFAULT_MB_RDMA_LOW_WATER_JUMBO 0x00000130 ++#define DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780 0x00000000 ++#define BUFMGR_MB_MACRX_LOW_WATER 0x00004414 ++#define DEFAULT_MB_MACRX_LOW_WATER 0x00000020 ++#define DEFAULT_MB_MACRX_LOW_WATER_5705 0x00000010 ++#define DEFAULT_MB_MACRX_LOW_WATER_5906 0x00000004 ++#define DEFAULT_MB_MACRX_LOW_WATER_57765 0x0000002a ++#define DEFAULT_MB_MACRX_LOW_WATER_JUMBO 0x00000098 ++#define DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780 0x0000004b ++#define DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765 0x0000007e ++#define BUFMGR_MB_HIGH_WATER 0x00004418 ++#define DEFAULT_MB_HIGH_WATER 0x00000060 ++#define DEFAULT_MB_HIGH_WATER_5705 0x00000060 ++#define DEFAULT_MB_HIGH_WATER_5906 0x00000010 ++#define DEFAULT_MB_HIGH_WATER_57765 0x000000a0 ++#define DEFAULT_MB_HIGH_WATER_JUMBO 0x0000017c ++#define DEFAULT_MB_HIGH_WATER_JUMBO_5780 0x00000096 ++#define DEFAULT_MB_HIGH_WATER_JUMBO_57765 0x000000ea ++#define BUFMGR_RX_MB_ALLOC_REQ 0x0000441c ++#define BUFMGR_MB_ALLOC_BIT 0x10000000 ++#define BUFMGR_RX_MB_ALLOC_RESP 0x00004420 ++#define BUFMGR_TX_MB_ALLOC_REQ 0x00004424 ++#define BUFMGR_TX_MB_ALLOC_RESP 0x00004428 ++#define BUFMGR_DMA_DESC_POOL_ADDR 0x0000442c ++#define BUFMGR_DMA_DESC_POOL_SIZE 0x00004430 ++#define BUFMGR_DMA_LOW_WATER 0x00004434 ++#define DEFAULT_DMA_LOW_WATER 0x00000005 ++#define BUFMGR_DMA_HIGH_WATER 0x00004438 ++#define DEFAULT_DMA_HIGH_WATER 0x0000000a ++#define BUFMGR_RX_DMA_ALLOC_REQ 0x0000443c ++#define BUFMGR_RX_DMA_ALLOC_RESP 0x00004440 ++#define BUFMGR_TX_DMA_ALLOC_REQ 0x00004444 ++#define BUFMGR_TX_DMA_ALLOC_RESP 0x00004448 ++#define BUFMGR_HWDIAG_0 0x0000444c ++#define BUFMGR_HWDIAG_1 0x00004450 ++#define BUFMGR_HWDIAG_2 0x00004454 ++/* 0x4458 --> 0x4800 unused */ ++ ++/* Read DMA control registers */ ++#define RDMAC_MODE 0x00004800 ++#define RDMAC_MODE_RESET 0x00000001 ++#define RDMAC_MODE_ENABLE 0x00000002 ++#define RDMAC_MODE_TGTABORT_ENAB 0x00000004 ++#define RDMAC_MODE_MSTABORT_ENAB 0x00000008 ++#define RDMAC_MODE_PARITYERR_ENAB 0x00000010 ++#define RDMAC_MODE_ADDROFLOW_ENAB 0x00000020 ++#define RDMAC_MODE_FIFOOFLOW_ENAB 0x00000040 ++#define RDMAC_MODE_FIFOURUN_ENAB 0x00000080 ++#define RDMAC_MODE_FIFOOREAD_ENAB 0x00000100 ++#define RDMAC_MODE_LNGREAD_ENAB 0x00000200 ++#define RDMAC_MODE_SPLIT_ENABLE 0x00000800 ++#define RDMAC_MODE_BD_SBD_CRPT_ENAB 0x00000800 ++#define RDMAC_MODE_SPLIT_RESET 0x00001000 ++#define RDMAC_MODE_MBUF_RBD_CRPT_ENAB 0x00001000 ++#define RDMAC_MODE_MBUF_SBD_CRPT_ENAB 0x00002000 ++#define RDMAC_MODE_FIFO_SIZE_128 0x00020000 ++#define RDMAC_MODE_FIFO_LONG_BURST 0x00030000 ++#define RDMAC_MODE_JMB_2K_MMRR 0x00800000 ++#define RDMAC_MODE_MULT_DMA_RD_DIS 0x01000000 ++#define RDMAC_MODE_IPV4_LSO_EN 0x08000000 ++#define RDMAC_MODE_IPV6_LSO_EN 0x10000000 ++#define RDMAC_MODE_H2BNC_VLAN_DET 0x20000000 ++#define RDMAC_STATUS 0x00004804 ++#define RDMAC_STATUS_TGTABORT 0x00000004 ++#define RDMAC_STATUS_MSTABORT 0x00000008 ++#define RDMAC_STATUS_PARITYERR 0x00000010 ++#define RDMAC_STATUS_ADDROFLOW 0x00000020 ++#define RDMAC_STATUS_FIFOOFLOW 0x00000040 ++#define RDMAC_STATUS_FIFOURUN 0x00000080 ++#define RDMAC_STATUS_FIFOOREAD 0x00000100 ++#define RDMAC_STATUS_LNGREAD 0x00000200 ++/* 0x4808 --> 0x4900 unused */ ++ ++#define TG3_RDMA_RSRVCTRL_REG2 0x00004890 ++#define TG3_LSO_RD_DMA_CRPTEN_CTRL2 0x000048a0 ++ ++#define TG3_RDMA_RSRVCTRL_REG 0x00004900 ++#define TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX 0x00000004 ++#define TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K 0x00000c00 ++#define TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK 0x00000ff0 ++#define TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K 0x000c0000 ++#define TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK 0x000ff000 ++#define TG3_RDMA_RSRVCTRL_TXMRGN_320B 0x28000000 ++#define TG3_RDMA_RSRVCTRL_TXMRGN_MASK 0xffe00000 ++/* 0x4904 --> 0x4910 unused */ ++ ++#define TG3_LSO_RD_DMA_CRPTEN_CTRL 0x00004910 ++#define TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K 0x00030000 ++#define TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K 0x000c0000 ++#define TG3_LSO_RD_DMA_TX_LENGTH_WA_5719 0x02000000 ++#define TG3_LSO_RD_DMA_TX_LENGTH_WA_5720 0x00200000 ++/* 0x4914 --> 0x4be0 unused */ ++ ++#define TG3_NUM_RDMA_CHANNELS 4 ++#define TG3_RDMA_LENGTH 0x00004be0 ++ ++/* Write DMA control registers */ ++#define WDMAC_MODE 0x00004c00 ++#define WDMAC_MODE_RESET 0x00000001 ++#define WDMAC_MODE_ENABLE 0x00000002 ++#define WDMAC_MODE_TGTABORT_ENAB 0x00000004 ++#define WDMAC_MODE_MSTABORT_ENAB 0x00000008 ++#define WDMAC_MODE_PARITYERR_ENAB 0x00000010 ++#define WDMAC_MODE_ADDROFLOW_ENAB 0x00000020 ++#define WDMAC_MODE_FIFOOFLOW_ENAB 0x00000040 ++#define WDMAC_MODE_FIFOURUN_ENAB 0x00000080 ++#define WDMAC_MODE_FIFOOREAD_ENAB 0x00000100 ++#define WDMAC_MODE_LNGREAD_ENAB 0x00000200 ++#define WDMAC_MODE_RX_ACCEL 0x00000400 ++#define WDMAC_MODE_STATUS_TAG_FIX 0x20000000 ++#define WDMAC_MODE_BURST_ALL_DATA 0xc0000000 ++#define WDMAC_STATUS 0x00004c04 ++#define WDMAC_STATUS_TGTABORT 0x00000004 ++#define WDMAC_STATUS_MSTABORT 0x00000008 ++#define WDMAC_STATUS_PARITYERR 0x00000010 ++#define WDMAC_STATUS_ADDROFLOW 0x00000020 ++#define WDMAC_STATUS_FIFOOFLOW 0x00000040 ++#define WDMAC_STATUS_FIFOURUN 0x00000080 ++#define WDMAC_STATUS_FIFOOREAD 0x00000100 ++#define WDMAC_STATUS_LNGREAD 0x00000200 ++/* 0x4c08 --> 0x5000 unused */ ++ ++/* Per-cpu register offsets (arm9) */ ++#define CPU_MODE 0x00000000 ++#define CPU_MODE_RESET 0x00000001 ++#define CPU_MODE_HALT 0x00000400 ++#define CPU_STATE 0x00000004 ++#define CPU_EVTMASK 0x00000008 ++/* 0xc --> 0x1c reserved */ ++#define CPU_PC 0x0000001c ++#define CPU_INSN 0x00000020 ++#define CPU_SPAD_UFLOW 0x00000024 ++#define CPU_WDOG_CLEAR 0x00000028 ++#define CPU_WDOG_VECTOR 0x0000002c ++#define CPU_WDOG_PC 0x00000030 ++#define CPU_HW_BP 0x00000034 ++/* 0x38 --> 0x44 unused */ ++#define CPU_WDOG_SAVED_STATE 0x00000044 ++#define CPU_LAST_BRANCH_ADDR 0x00000048 ++#define CPU_SPAD_UFLOW_SET 0x0000004c ++/* 0x50 --> 0x200 unused */ ++#define CPU_R0 0x00000200 ++#define CPU_R1 0x00000204 ++#define CPU_R2 0x00000208 ++#define CPU_R3 0x0000020c ++#define CPU_R4 0x00000210 ++#define CPU_R5 0x00000214 ++#define CPU_R6 0x00000218 ++#define CPU_R7 0x0000021c ++#define CPU_R8 0x00000220 ++#define CPU_R9 0x00000224 ++#define CPU_R10 0x00000228 ++#define CPU_R11 0x0000022c ++#define CPU_R12 0x00000230 ++#define CPU_R13 0x00000234 ++#define CPU_R14 0x00000238 ++#define CPU_R15 0x0000023c ++#define CPU_R16 0x00000240 ++#define CPU_R17 0x00000244 ++#define CPU_R18 0x00000248 ++#define CPU_R19 0x0000024c ++#define CPU_R20 0x00000250 ++#define CPU_R21 0x00000254 ++#define CPU_R22 0x00000258 ++#define CPU_R23 0x0000025c ++#define CPU_R24 0x00000260 ++#define CPU_R25 0x00000264 ++#define CPU_R26 0x00000268 ++#define CPU_R27 0x0000026c ++#define CPU_R28 0x00000270 ++#define CPU_R29 0x00000274 ++#define CPU_R30 0x00000278 ++#define CPU_R31 0x0000027c ++/* 0x280 --> 0x400 unused */ ++ ++#define RX_CPU_BASE 0x00005000 ++#define RX_CPU_MODE 0x00005000 ++#define RX_CPU_STATE 0x00005004 ++#define RX_CPU_PGMCTR 0x0000501c ++#define RX_CPU_HWBKPT 0x00005034 ++#define TX_CPU_BASE 0x00005400 ++#define TX_CPU_MODE 0x00005400 ++#define TX_CPU_STATE 0x00005404 ++#define TX_CPU_PGMCTR 0x0000541c ++ ++#define VCPU_STATUS 0x00005100 ++#define VCPU_STATUS_INIT_DONE 0x04000000 ++#define VCPU_STATUS_DRV_RESET 0x08000000 ++ ++#define VCPU_CFGSHDW 0x00005104 ++#define VCPU_CFGSHDW_WOL_ENABLE 0x00000001 ++#define VCPU_CFGSHDW_WOL_MAGPKT 0x00000004 ++#define VCPU_CFGSHDW_ASPM_DBNC 0x00001000 ++ ++#define MAC_VRQFLT_CFG 0x00005400 ++#define MAC_VRQFLT_ELEM_EN 0x80000000 ++#define MAC_VRQFLT_HDR_VLAN 0x0000e000 ++#define MAC_VRQFLT_PTRN 0x00005480 ++#define MAC_VRQFLT_PTRN_VLANID 0x0000ffff ++#define MAC_VRQFLT_FLTSET 0x00005500 ++ ++/* Mailboxes */ ++#define GRCMBOX_BASE 0x00005600 ++#define MAC_VRQMAP_1H 0x00005600 ++#define MAC_VRQMAP_1H_PTA_PFEN 0x00000020 ++#define MAC_VRQMAP_2H 0x00005604 ++#define MAC_VRQMAP_2H_PTA_VFEN 0x00000020 ++#define MAC_VRQMAP_2H_PTA_AND 0x00000000 ++#define MAC_VRQMAP_2H_PTA_OR 0x00000040 ++#define MAC_VRQMAP_2H_PTA_EN 0x00000080 ++#define MAC_VRQ_PMATCH_HI_5 0x00005690 ++#define MAC_VRQ_PMATCH_LO_5 0x00005694 ++#define GRCMBOX_INTERRUPT_0 0x00005800 /* 64-bit */ ++#define GRCMBOX_INTERRUPT_1 0x00005808 /* 64-bit */ ++#define GRCMBOX_INTERRUPT_2 0x00005810 /* 64-bit */ ++#define GRCMBOX_INTERRUPT_3 0x00005818 /* 64-bit */ ++#define GRCMBOX_GENERAL_0 0x00005820 /* 64-bit */ ++#define GRCMBOX_GENERAL_1 0x00005828 /* 64-bit */ ++#define GRCMBOX_GENERAL_2 0x00005830 /* 64-bit */ ++#define GRCMBOX_GENERAL_3 0x00005838 /* 64-bit */ ++#define GRCMBOX_GENERAL_4 0x00005840 /* 64-bit */ ++#define GRCMBOX_GENERAL_5 0x00005848 /* 64-bit */ ++#define GRCMBOX_GENERAL_6 0x00005850 /* 64-bit */ ++#define GRCMBOX_GENERAL_7 0x00005858 /* 64-bit */ ++#define GRCMBOX_RELOAD_STAT 0x00005860 /* 64-bit */ ++#define GRCMBOX_RCVSTD_PROD_IDX 0x00005868 /* 64-bit */ ++#define GRCMBOX_RCVJUMBO_PROD_IDX 0x00005870 /* 64-bit */ ++#define GRCMBOX_RCVMINI_PROD_IDX 0x00005878 /* 64-bit */ ++#define GRCMBOX_RCVRET_CON_IDX_0 0x00005880 /* 64-bit */ ++#define GRCMBOX_RCVRET_CON_IDX_1 0x00005888 /* 64-bit */ ++#define GRCMBOX_RCVRET_CON_IDX_2 0x00005890 /* 64-bit */ ++#define GRCMBOX_RCVRET_CON_IDX_3 0x00005898 /* 64-bit */ ++#define GRCMBOX_RCVRET_CON_IDX_4 0x000058a0 /* 64-bit */ ++#define GRCMBOX_RCVRET_CON_IDX_5 0x000058a8 /* 64-bit */ ++#define GRCMBOX_RCVRET_CON_IDX_6 0x000058b0 /* 64-bit */ ++#define GRCMBOX_RCVRET_CON_IDX_7 0x000058b8 /* 64-bit */ ++#define GRCMBOX_RCVRET_CON_IDX_8 0x000058c0 /* 64-bit */ ++#define GRCMBOX_RCVRET_CON_IDX_9 0x000058c8 /* 64-bit */ ++#define GRCMBOX_RCVRET_CON_IDX_10 0x000058d0 /* 64-bit */ ++#define GRCMBOX_RCVRET_CON_IDX_11 0x000058d8 /* 64-bit */ ++#define GRCMBOX_RCVRET_CON_IDX_12 0x000058e0 /* 64-bit */ ++#define GRCMBOX_RCVRET_CON_IDX_13 0x000058e8 /* 64-bit */ ++#define GRCMBOX_RCVRET_CON_IDX_14 0x000058f0 /* 64-bit */ ++#define GRCMBOX_RCVRET_CON_IDX_15 0x000058f8 /* 64-bit */ ++#define GRCMBOX_SNDHOST_PROD_IDX_0 0x00005900 /* 64-bit */ ++#define GRCMBOX_SNDHOST_PROD_IDX_1 0x00005908 /* 64-bit */ ++#define GRCMBOX_SNDHOST_PROD_IDX_2 0x00005910 /* 64-bit */ ++#define GRCMBOX_SNDHOST_PROD_IDX_3 0x00005918 /* 64-bit */ ++#define GRCMBOX_SNDHOST_PROD_IDX_4 0x00005920 /* 64-bit */ ++#define GRCMBOX_SNDHOST_PROD_IDX_5 0x00005928 /* 64-bit */ ++#define GRCMBOX_SNDHOST_PROD_IDX_6 0x00005930 /* 64-bit */ ++#define GRCMBOX_SNDHOST_PROD_IDX_7 0x00005938 /* 64-bit */ ++#define GRCMBOX_SNDHOST_PROD_IDX_8 0x00005940 /* 64-bit */ ++#define GRCMBOX_SNDHOST_PROD_IDX_9 0x00005948 /* 64-bit */ ++#define GRCMBOX_SNDHOST_PROD_IDX_10 0x00005950 /* 64-bit */ ++#define GRCMBOX_SNDHOST_PROD_IDX_11 0x00005958 /* 64-bit */ ++#define GRCMBOX_SNDHOST_PROD_IDX_12 0x00005960 /* 64-bit */ ++#define GRCMBOX_SNDHOST_PROD_IDX_13 0x00005968 /* 64-bit */ ++#define GRCMBOX_SNDHOST_PROD_IDX_14 0x00005970 /* 64-bit */ ++#define GRCMBOX_SNDHOST_PROD_IDX_15 0x00005978 /* 64-bit */ ++#define GRCMBOX_SNDNIC_PROD_IDX_0 0x00005980 /* 64-bit */ ++#define GRCMBOX_SNDNIC_PROD_IDX_1 0x00005988 /* 64-bit */ ++#define GRCMBOX_SNDNIC_PROD_IDX_2 0x00005990 /* 64-bit */ ++#define GRCMBOX_SNDNIC_PROD_IDX_3 0x00005998 /* 64-bit */ ++#define GRCMBOX_SNDNIC_PROD_IDX_4 0x000059a0 /* 64-bit */ ++#define GRCMBOX_SNDNIC_PROD_IDX_5 0x000059a8 /* 64-bit */ ++#define GRCMBOX_SNDNIC_PROD_IDX_6 0x000059b0 /* 64-bit */ ++#define GRCMBOX_SNDNIC_PROD_IDX_7 0x000059b8 /* 64-bit */ ++#define GRCMBOX_SNDNIC_PROD_IDX_8 0x000059c0 /* 64-bit */ ++#define GRCMBOX_SNDNIC_PROD_IDX_9 0x000059c8 /* 64-bit */ ++#define GRCMBOX_SNDNIC_PROD_IDX_10 0x000059d0 /* 64-bit */ ++#define GRCMBOX_SNDNIC_PROD_IDX_11 0x000059d8 /* 64-bit */ ++#define GRCMBOX_SNDNIC_PROD_IDX_12 0x000059e0 /* 64-bit */ ++#define GRCMBOX_SNDNIC_PROD_IDX_13 0x000059e8 /* 64-bit */ ++#define GRCMBOX_SNDNIC_PROD_IDX_14 0x000059f0 /* 64-bit */ ++#define GRCMBOX_SNDNIC_PROD_IDX_15 0x000059f8 /* 64-bit */ ++#define GRCMBOX_HIGH_PRIO_EV_VECTOR 0x00005a00 ++#define GRCMBOX_HIGH_PRIO_EV_MASK 0x00005a04 ++#define GRCMBOX_LOW_PRIO_EV_VEC 0x00005a08 ++#define GRCMBOX_LOW_PRIO_EV_MASK 0x00005a0c ++/* 0x5a10 --> 0x5c00 */ ++ ++/* Flow Through queues */ ++#define FTQ_RESET 0x00005c00 ++/* 0x5c04 --> 0x5c10 unused */ ++#define FTQ_DMA_NORM_READ_CTL 0x00005c10 ++#define FTQ_DMA_NORM_READ_FULL_CNT 0x00005c14 ++#define FTQ_DMA_NORM_READ_FIFO_ENQDEQ 0x00005c18 ++#define FTQ_DMA_NORM_READ_WRITE_PEEK 0x00005c1c ++#define FTQ_DMA_HIGH_READ_CTL 0x00005c20 ++#define FTQ_DMA_HIGH_READ_FULL_CNT 0x00005c24 ++#define FTQ_DMA_HIGH_READ_FIFO_ENQDEQ 0x00005c28 ++#define FTQ_DMA_HIGH_READ_WRITE_PEEK 0x00005c2c ++#define FTQ_DMA_COMP_DISC_CTL 0x00005c30 ++#define FTQ_DMA_COMP_DISC_FULL_CNT 0x00005c34 ++#define FTQ_DMA_COMP_DISC_FIFO_ENQDEQ 0x00005c38 ++#define FTQ_DMA_COMP_DISC_WRITE_PEEK 0x00005c3c ++#define FTQ_SEND_BD_COMP_CTL 0x00005c40 ++#define FTQ_SEND_BD_COMP_FULL_CNT 0x00005c44 ++#define FTQ_SEND_BD_COMP_FIFO_ENQDEQ 0x00005c48 ++#define FTQ_SEND_BD_COMP_WRITE_PEEK 0x00005c4c ++#define FTQ_SEND_DATA_INIT_CTL 0x00005c50 ++#define FTQ_SEND_DATA_INIT_FULL_CNT 0x00005c54 ++#define FTQ_SEND_DATA_INIT_FIFO_ENQDEQ 0x00005c58 ++#define FTQ_SEND_DATA_INIT_WRITE_PEEK 0x00005c5c ++#define FTQ_DMA_NORM_WRITE_CTL 0x00005c60 ++#define FTQ_DMA_NORM_WRITE_FULL_CNT 0x00005c64 ++#define FTQ_DMA_NORM_WRITE_FIFO_ENQDEQ 0x00005c68 ++#define FTQ_DMA_NORM_WRITE_WRITE_PEEK 0x00005c6c ++#define FTQ_DMA_HIGH_WRITE_CTL 0x00005c70 ++#define FTQ_DMA_HIGH_WRITE_FULL_CNT 0x00005c74 ++#define FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ 0x00005c78 ++#define FTQ_DMA_HIGH_WRITE_WRITE_PEEK 0x00005c7c ++#define FTQ_SWTYPE1_CTL 0x00005c80 ++#define FTQ_SWTYPE1_FULL_CNT 0x00005c84 ++#define FTQ_SWTYPE1_FIFO_ENQDEQ 0x00005c88 ++#define FTQ_SWTYPE1_WRITE_PEEK 0x00005c8c ++#define FTQ_SEND_DATA_COMP_CTL 0x00005c90 ++#define FTQ_SEND_DATA_COMP_FULL_CNT 0x00005c94 ++#define FTQ_SEND_DATA_COMP_FIFO_ENQDEQ 0x00005c98 ++#define FTQ_SEND_DATA_COMP_WRITE_PEEK 0x00005c9c ++#define FTQ_HOST_COAL_CTL 0x00005ca0 ++#define FTQ_HOST_COAL_FULL_CNT 0x00005ca4 ++#define FTQ_HOST_COAL_FIFO_ENQDEQ 0x00005ca8 ++#define FTQ_HOST_COAL_WRITE_PEEK 0x00005cac ++#define FTQ_MAC_TX_CTL 0x00005cb0 ++#define FTQ_MAC_TX_FULL_CNT 0x00005cb4 ++#define FTQ_MAC_TX_FIFO_ENQDEQ 0x00005cb8 ++#define FTQ_MAC_TX_WRITE_PEEK 0x00005cbc ++#define FTQ_MB_FREE_CTL 0x00005cc0 ++#define FTQ_MB_FREE_FULL_CNT 0x00005cc4 ++#define FTQ_MB_FREE_FIFO_ENQDEQ 0x00005cc8 ++#define FTQ_MB_FREE_WRITE_PEEK 0x00005ccc ++#define FTQ_RCVBD_COMP_CTL 0x00005cd0 ++#define FTQ_RCVBD_COMP_FULL_CNT 0x00005cd4 ++#define FTQ_RCVBD_COMP_FIFO_ENQDEQ 0x00005cd8 ++#define FTQ_RCVBD_COMP_WRITE_PEEK 0x00005cdc ++#define FTQ_RCVLST_PLMT_CTL 0x00005ce0 ++#define FTQ_RCVLST_PLMT_FULL_CNT 0x00005ce4 ++#define FTQ_RCVLST_PLMT_FIFO_ENQDEQ 0x00005ce8 ++#define FTQ_RCVLST_PLMT_WRITE_PEEK 0x00005cec ++#define FTQ_RCVDATA_INI_CTL 0x00005cf0 ++#define FTQ_RCVDATA_INI_FULL_CNT 0x00005cf4 ++#define FTQ_RCVDATA_INI_FIFO_ENQDEQ 0x00005cf8 ++#define FTQ_RCVDATA_INI_WRITE_PEEK 0x00005cfc ++#define FTQ_RCVDATA_COMP_CTL 0x00005d00 ++#define FTQ_RCVDATA_COMP_FULL_CNT 0x00005d04 ++#define FTQ_RCVDATA_COMP_FIFO_ENQDEQ 0x00005d08 ++#define FTQ_RCVDATA_COMP_WRITE_PEEK 0x00005d0c ++#define FTQ_SWTYPE2_CTL 0x00005d10 ++#define FTQ_SWTYPE2_FULL_CNT 0x00005d14 ++#define FTQ_SWTYPE2_FIFO_ENQDEQ 0x00005d18 ++#define FTQ_SWTYPE2_WRITE_PEEK 0x00005d1c ++/* 0x5d20 --> 0x6000 unused */ ++ ++/* Message signaled interrupt registers */ ++#define MSGINT_MODE 0x00006000 ++#define MSGINT_MODE_RESET 0x00000001 ++#define MSGINT_MODE_ENABLE 0x00000002 ++#define MSGINT_MODE_ONE_SHOT_DISABLE 0x00000020 ++#define MSGINT_MODE_MULTIVEC_EN 0x00000080 ++#define MSGINT_STATUS 0x00006004 ++#define MSGINT_STATUS_MSI_REQ 0x00000001 ++#define MSGINT_FIFO 0x00006008 ++/* 0x600c --> 0x6400 unused */ ++ ++/* DMA completion registers */ ++#define DMAC_MODE 0x00006400 ++#define DMAC_MODE_RESET 0x00000001 ++#define DMAC_MODE_ENABLE 0x00000002 ++/* 0x6404 --> 0x6800 unused */ ++ ++/* GRC registers */ ++#define GRC_MODE 0x00006800 ++#define GRC_MODE_UPD_ON_COAL 0x00000001 ++#define GRC_MODE_BSWAP_NONFRM_DATA 0x00000002 ++#define GRC_MODE_WSWAP_NONFRM_DATA 0x00000004 ++#define GRC_MODE_BSWAP_DATA 0x00000010 ++#define GRC_MODE_WSWAP_DATA 0x00000020 ++#define GRC_MODE_BYTE_SWAP_B2HRX_DATA 0x00000040 ++#define GRC_MODE_WORD_SWAP_B2HRX_DATA 0x00000080 ++#define GRC_MODE_IOV_ENABLE 0x00000100 ++#define GRC_MODE_SPLITHDR 0x00000100 ++#define GRC_MODE_NOFRM_CRACKING 0x00000200 ++#define GRC_MODE_INCL_CRC 0x00000400 ++#define GRC_MODE_ALLOW_BAD_FRMS 0x00000800 ++#define GRC_MODE_NOIRQ_ON_SENDS 0x00002000 ++#define GRC_MODE_NOIRQ_ON_RCV 0x00004000 ++#define GRC_MODE_FORCE_PCI32BIT 0x00008000 ++#define GRC_MODE_B2HRX_ENABLE 0x00008000 ++#define GRC_MODE_HOST_STACKUP 0x00010000 ++#define GRC_MODE_HOST_SENDBDS 0x00020000 ++#define GRC_MODE_HTX2B_ENABLE 0x00040000 ++#define GRC_MODE_TIME_SYNC_ENABLE 0x00080000 ++#define GRC_MODE_NO_TX_PHDR_CSUM 0x00100000 ++#define GRC_MODE_NVRAM_WR_ENABLE 0x00200000 ++#define GRC_MODE_PCIE_TL_SEL 0x00000000 ++#define GRC_MODE_PCIE_PL_SEL 0x00400000 ++#define GRC_MODE_NO_RX_PHDR_CSUM 0x00800000 ++#define GRC_MODE_IRQ_ON_TX_CPU_ATTN 0x01000000 ++#define GRC_MODE_IRQ_ON_RX_CPU_ATTN 0x02000000 ++#define GRC_MODE_IRQ_ON_MAC_ATTN 0x04000000 ++#define GRC_MODE_IRQ_ON_DMA_ATTN 0x08000000 ++#define GRC_MODE_IRQ_ON_FLOW_ATTN 0x10000000 ++#define GRC_MODE_4X_NIC_SEND_RINGS 0x20000000 ++#define GRC_MODE_PCIE_DL_SEL 0x20000000 ++#define GRC_MODE_MCAST_FRM_ENABLE 0x40000000 ++#define GRC_MODE_PCIE_HI_1K_EN 0x80000000 ++#define GRC_MODE_PCIE_PORT_MASK (GRC_MODE_PCIE_TL_SEL | \ ++ GRC_MODE_PCIE_PL_SEL | \ ++ GRC_MODE_PCIE_DL_SEL | \ ++ GRC_MODE_PCIE_HI_1K_EN) ++#define GRC_MISC_CFG 0x00006804 ++#define GRC_MISC_CFG_CORECLK_RESET 0x00000001 ++#define GRC_MISC_CFG_PRESCALAR_MASK 0x000000fe ++#define GRC_MISC_CFG_PRESCALAR_SHIFT 1 ++#define GRC_MISC_CFG_BOARD_ID_MASK 0x0001e000 ++#define GRC_MISC_CFG_BOARD_ID_5700 0x0001e000 ++#define GRC_MISC_CFG_BOARD_ID_5701 0x00000000 ++#define GRC_MISC_CFG_BOARD_ID_5702FE 0x00004000 ++#define GRC_MISC_CFG_BOARD_ID_5703 0x00000000 ++#define GRC_MISC_CFG_BOARD_ID_5703S 0x00002000 ++#define GRC_MISC_CFG_BOARD_ID_5704 0x00000000 ++#define GRC_MISC_CFG_BOARD_ID_5704CIOBE 0x00004000 ++#define GRC_MISC_CFG_BOARD_ID_5704_A2 0x00008000 ++#define GRC_MISC_CFG_BOARD_ID_5788 0x00010000 ++#define GRC_MISC_CFG_BOARD_ID_5788M 0x00018000 ++#define GRC_MISC_CFG_BOARD_ID_AC91002A1 0x00018000 ++#define GRC_MISC_CFG_EPHY_IDDQ 0x00200000 ++#define GRC_MISC_CFG_KEEP_GPHY_POWER 0x04000000 ++#define GRC_LOCAL_CTRL 0x00006808 ++#define GRC_LCLCTRL_INT_ACTIVE 0x00000001 ++#define GRC_LCLCTRL_CLEARINT 0x00000002 ++#define GRC_LCLCTRL_SETINT 0x00000004 ++#define GRC_LCLCTRL_INT_ON_ATTN 0x00000008 ++#define GRC_LCLCTRL_GPIO_UART_SEL 0x00000010 /* 5755 only */ ++#define GRC_LCLCTRL_USE_SIG_DETECT 0x00000010 /* 5714/5780 only */ ++#define GRC_LCLCTRL_USE_EXT_SIG_DETECT 0x00000020 /* 5714/5780 only */ ++#define GRC_LCLCTRL_GPIO_INPUT3 0x00000020 ++#define GRC_LCLCTRL_GPIO_OE3 0x00000040 ++#define GRC_LCLCTRL_GPIO_OUTPUT3 0x00000080 ++#define GRC_LCLCTRL_GPIO_INPUT0 0x00000100 ++#define GRC_LCLCTRL_GPIO_INPUT1 0x00000200 ++#define GRC_LCLCTRL_GPIO_INPUT2 0x00000400 ++#define GRC_LCLCTRL_GPIO_OE0 0x00000800 ++#define GRC_LCLCTRL_GPIO_OE1 0x00001000 ++#define GRC_LCLCTRL_GPIO_OE2 0x00002000 ++#define GRC_LCLCTRL_GPIO_OUTPUT0 0x00004000 ++#define GRC_LCLCTRL_GPIO_OUTPUT1 0x00008000 ++#define GRC_LCLCTRL_GPIO_OUTPUT2 0x00010000 ++#define GRC_LCLCTRL_EXTMEM_ENABLE 0x00020000 ++#define GRC_LCLCTRL_MEMSZ_MASK 0x001c0000 ++#define GRC_LCLCTRL_MEMSZ_256K 0x00000000 ++#define GRC_LCLCTRL_MEMSZ_512K 0x00040000 ++#define GRC_LCLCTRL_MEMSZ_1M 0x00080000 ++#define GRC_LCLCTRL_MEMSZ_2M 0x000c0000 ++#define GRC_LCLCTRL_MEMSZ_4M 0x00100000 ++#define GRC_LCLCTRL_MEMSZ_8M 0x00140000 ++#define GRC_LCLCTRL_MEMSZ_16M 0x00180000 ++#define GRC_LCLCTRL_BANK_SELECT 0x00200000 ++#define GRC_LCLCTRL_SSRAM_TYPE 0x00400000 ++#define GRC_LCLCTRL_AUTO_SEEPROM 0x01000000 ++#define GRC_TIMER 0x0000680c ++#define GRC_RX_CPU_EVENT 0x00006810 ++#define GRC_RX_CPU_DRIVER_EVENT 0x00004000 ++#define GRC_RX_TIMER_REF 0x00006814 ++#define GRC_RX_CPU_SEM 0x00006818 ++#define GRC_REMOTE_RX_CPU_ATTN 0x0000681c ++#define GRC_TX_CPU_EVENT 0x00006820 ++#define GRC_TX_TIMER_REF 0x00006824 ++#define GRC_TX_CPU_SEM 0x00006828 ++#define GRC_REMOTE_TX_CPU_ATTN 0x0000682c ++#define GRC_MEM_POWER_UP 0x00006830 /* 64-bit */ ++#define GRC_EEPROM_ADDR 0x00006838 ++#define EEPROM_ADDR_WRITE 0x00000000 ++#define EEPROM_ADDR_READ 0x80000000 ++#define EEPROM_ADDR_COMPLETE 0x40000000 ++#define EEPROM_ADDR_FSM_RESET 0x20000000 ++#define EEPROM_ADDR_DEVID_MASK 0x1c000000 ++#define EEPROM_ADDR_DEVID_SHIFT 26 ++#define EEPROM_ADDR_START 0x02000000 ++#define EEPROM_ADDR_CLKPERD_SHIFT 16 ++#define EEPROM_ADDR_ADDR_MASK 0x0000ffff ++#define EEPROM_ADDR_ADDR_SHIFT 0 ++#define EEPROM_DEFAULT_CLOCK_PERIOD 0x60 ++#define EEPROM_CHIP_SIZE (64 * 1024) ++#define GRC_EEPROM_DATA 0x0000683c ++#define GRC_EEPROM_CTRL 0x00006840 ++#define GRC_MDI_CTRL 0x00006844 ++#define GRC_SEEPROM_DELAY 0x00006848 ++/* 0x684c --> 0x6890 unused */ ++#define GRC_VCPU_EXT_CTRL 0x00006890 ++#define GRC_VCPU_EXT_CTRL_HALT_CPU 0x00400000 ++#define GRC_VCPU_EXT_CTRL_DISABLE_WOL 0x20000000 ++#define GRC_FASTBOOT_PC 0x00006894 /* 5752, 5755, 5787 */ ++ ++#define TG3_EAV_REF_CLCK_LSB 0x00006900 ++#define TG3_EAV_REF_CLCK_MSB 0x00006904 ++#define TG3_EAV_REF_CLCK_CTL 0x00006908 ++#define TG3_EAV_REF_CLCK_CTL_STOP 0x00000002 ++#define TG3_EAV_REF_CLCK_CTL_RESUME 0x00000004 ++#define TG3_EAV_CTL_TSYNC_GPIO_MASK (0x3 << 16) ++#define TG3_EAV_CTL_TSYNC_WDOG0 (1 << 17) ++#define TG3_EAV_REF_CLK_CORRECT_CTL 0x00006928 ++#define TG3_EAV_REF_CLK_CORRECT_EN (1 << 31) ++#define TG3_EAV_REF_CLK_CORRECT_NEG (1 << 30) ++ ++#define TG3_EAV_REF_CLK_CORRECT_MASK 0xffffff ++ ++#define TG3_EAV_WATCHDOG0_LSB 0x00006918 ++#define TG3_EAV_WATCHDOG0_MSB 0x0000691c ++#define TG3_EAV_WATCHDOG0_EN (1 << 31) ++#define TG3_EAV_WATCHDOG_MSB_MASK 0x7fffffff ++/* 0x690c --> 0x7000 unused */ ++ ++/* NVRAM Control registers */ ++#define NVRAM_CMD 0x00007000 ++#define NVRAM_CMD_RESET 0x00000001 ++#define NVRAM_CMD_DONE 0x00000008 ++#define NVRAM_CMD_GO 0x00000010 ++#define NVRAM_CMD_WR 0x00000020 ++#define NVRAM_CMD_RD 0x00000000 ++#define NVRAM_CMD_ERASE 0x00000040 ++#define NVRAM_CMD_FIRST 0x00000080 ++#define NVRAM_CMD_LAST 0x00000100 ++#define NVRAM_CMD_WREN 0x00010000 ++#define NVRAM_CMD_WRDI 0x00020000 ++#define NVRAM_STAT 0x00007004 ++#define NVRAM_WRDATA 0x00007008 ++#define NVRAM_ADDR 0x0000700c ++#define NVRAM_ADDR_MSK 0x07ffffff ++#define NVRAM_RDDATA 0x00007010 ++#define NVRAM_CFG1 0x00007014 ++#define NVRAM_CFG1_FLASHIF_ENAB 0x00000001 ++#define NVRAM_CFG1_BUFFERED_MODE 0x00000002 ++#define NVRAM_CFG1_PASS_THRU 0x00000004 ++#define NVRAM_CFG1_STATUS_BITS 0x00000070 ++#define NVRAM_CFG1_BIT_BANG 0x00000008 ++#define NVRAM_CFG1_FLASH_SIZE 0x02000000 ++#define NVRAM_CFG1_COMPAT_BYPASS 0x80000000 ++#define NVRAM_CFG1_VENDOR_MASK 0x03000003 ++#define FLASH_VENDOR_ATMEL_EEPROM 0x02000000 ++#define FLASH_VENDOR_ATMEL_FLASH_BUFFERED 0x02000003 ++#define FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED 0x00000003 ++#define FLASH_VENDOR_ST 0x03000001 ++#define FLASH_VENDOR_SAIFUN 0x01000003 ++#define FLASH_VENDOR_SST_SMALL 0x00000001 ++#define FLASH_VENDOR_SST_LARGE 0x02000001 ++#define NVRAM_CFG1_5752VENDOR_MASK 0x03c00003 ++#define NVRAM_CFG1_5762VENDOR_MASK 0x03e00003 ++#define FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ 0x00000000 ++#define FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ 0x02000000 ++#define FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED 0x02000003 ++#define FLASH_5752VENDOR_ST_M45PE10 0x02400000 ++#define FLASH_5752VENDOR_ST_M45PE20 0x02400002 ++#define FLASH_5752VENDOR_ST_M45PE40 0x02400001 ++#define FLASH_5755VENDOR_ATMEL_FLASH_1 0x03400001 ++#define FLASH_5755VENDOR_ATMEL_FLASH_2 0x03400002 ++#define FLASH_5755VENDOR_ATMEL_FLASH_3 0x03400000 ++#define FLASH_5755VENDOR_ATMEL_FLASH_4 0x00000003 ++#define FLASH_5755VENDOR_ATMEL_FLASH_5 0x02000003 ++#define FLASH_5755VENDOR_ATMEL_EEPROM_64KHZ 0x03c00003 ++#define FLASH_5755VENDOR_ATMEL_EEPROM_376KHZ 0x03c00002 ++#define FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ 0x03000003 ++#define FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ 0x03000002 ++#define FLASH_5787VENDOR_MICRO_EEPROM_64KHZ 0x03000000 ++#define FLASH_5787VENDOR_MICRO_EEPROM_376KHZ 0x02000000 ++#define FLASH_5761VENDOR_ATMEL_MDB021D 0x00800003 ++#define FLASH_5761VENDOR_ATMEL_MDB041D 0x00800000 ++#define FLASH_5761VENDOR_ATMEL_MDB081D 0x00800002 ++#define FLASH_5761VENDOR_ATMEL_MDB161D 0x00800001 ++#define FLASH_5761VENDOR_ATMEL_ADB021D 0x00000003 ++#define FLASH_5761VENDOR_ATMEL_ADB041D 0x00000000 ++#define FLASH_5761VENDOR_ATMEL_ADB081D 0x00000002 ++#define FLASH_5761VENDOR_ATMEL_ADB161D 0x00000001 ++#define FLASH_5761VENDOR_ST_M_M45PE20 0x02800001 ++#define FLASH_5761VENDOR_ST_M_M45PE40 0x02800000 ++#define FLASH_5761VENDOR_ST_M_M45PE80 0x02800002 ++#define FLASH_5761VENDOR_ST_M_M45PE16 0x02800003 ++#define FLASH_5761VENDOR_ST_A_M45PE20 0x02000001 ++#define FLASH_5761VENDOR_ST_A_M45PE40 0x02000000 ++#define FLASH_5761VENDOR_ST_A_M45PE80 0x02000002 ++#define FLASH_5761VENDOR_ST_A_M45PE16 0x02000003 ++#define FLASH_57780VENDOR_ATMEL_AT45DB011D 0x00400000 ++#define FLASH_57780VENDOR_ATMEL_AT45DB011B 0x03400000 ++#define FLASH_57780VENDOR_ATMEL_AT45DB021D 0x00400002 ++#define FLASH_57780VENDOR_ATMEL_AT45DB021B 0x03400002 ++#define FLASH_57780VENDOR_ATMEL_AT45DB041D 0x00400001 ++#define FLASH_57780VENDOR_ATMEL_AT45DB041B 0x03400001 ++#define FLASH_5717VENDOR_ATMEL_EEPROM 0x02000001 ++#define FLASH_5717VENDOR_MICRO_EEPROM 0x02000003 ++#define FLASH_5717VENDOR_ATMEL_MDB011D 0x01000001 ++#define FLASH_5717VENDOR_ATMEL_MDB021D 0x01000003 ++#define FLASH_5717VENDOR_ST_M_M25PE10 0x02000000 ++#define FLASH_5717VENDOR_ST_M_M25PE20 0x02000002 ++#define FLASH_5717VENDOR_ST_M_M45PE10 0x00000001 ++#define FLASH_5717VENDOR_ST_M_M45PE20 0x00000003 ++#define FLASH_5717VENDOR_ATMEL_ADB011B 0x01400000 ++#define FLASH_5717VENDOR_ATMEL_ADB021B 0x01400002 ++#define FLASH_5717VENDOR_ATMEL_ADB011D 0x01400001 ++#define FLASH_5717VENDOR_ATMEL_ADB021D 0x01400003 ++#define FLASH_5717VENDOR_ST_A_M25PE10 0x02400000 ++#define FLASH_5717VENDOR_ST_A_M25PE20 0x02400002 ++#define FLASH_5717VENDOR_ST_A_M45PE10 0x02400001 ++#define FLASH_5717VENDOR_ST_A_M45PE20 0x02400003 ++#define FLASH_5717VENDOR_ATMEL_45USPT 0x03400000 ++#define FLASH_5717VENDOR_ST_25USPT 0x03400002 ++#define FLASH_5717VENDOR_ST_45USPT 0x03400001 ++#define FLASH_5720_EEPROM_HD 0x00000001 ++#define FLASH_5720_EEPROM_LD 0x00000003 ++#define FLASH_5762_EEPROM_HD 0x02000001 ++#define FLASH_5762_EEPROM_LD 0x02000003 ++#define FLASH_5762_MX25L_100 0x00800000 ++#define FLASH_5762_MX25L_200 0x00800002 ++#define FLASH_5762_MX25L_400 0x00800001 ++#define FLASH_5762_MX25L_800 0x00800003 ++#define FLASH_5762_MX25L_160_320 0x03800002 ++#define FLASH_5720VENDOR_M_ATMEL_DB011D 0x01000000 ++#define FLASH_5720VENDOR_M_ATMEL_DB021D 0x01000002 ++#define FLASH_5720VENDOR_M_ATMEL_DB041D 0x01000001 ++#define FLASH_5720VENDOR_M_ATMEL_DB081D 0x01000003 ++#define FLASH_5720VENDOR_M_ST_M25PE10 0x02000000 ++#define FLASH_5720VENDOR_M_ST_M25PE20 0x02000002 ++#define FLASH_5720VENDOR_M_ST_M25PE40 0x02000001 ++#define FLASH_5720VENDOR_M_ST_M25PE80 0x02000003 ++#define FLASH_5720VENDOR_M_ST_M45PE10 0x03000000 ++#define FLASH_5720VENDOR_M_ST_M45PE20 0x03000002 ++#define FLASH_5720VENDOR_M_ST_M45PE40 0x03000001 ++#define FLASH_5720VENDOR_M_ST_M45PE80 0x03000003 ++#define FLASH_5720VENDOR_A_ATMEL_DB011B 0x01800000 ++#define FLASH_5720VENDOR_A_ATMEL_DB021B 0x01800002 ++#define FLASH_5720VENDOR_A_ATMEL_DB041B 0x01800001 ++#define FLASH_5720VENDOR_A_ATMEL_DB011D 0x01c00000 ++#define FLASH_5720VENDOR_A_ATMEL_DB021D 0x01c00002 ++#define FLASH_5720VENDOR_A_ATMEL_DB041D 0x01c00001 ++#define FLASH_5720VENDOR_A_ATMEL_DB081D 0x01c00003 ++#define FLASH_5720VENDOR_A_ST_M25PE10 0x02800000 ++#define FLASH_5720VENDOR_A_ST_M25PE20 0x02800002 ++#define FLASH_5720VENDOR_A_ST_M25PE40 0x02800001 ++#define FLASH_5720VENDOR_A_ST_M25PE80 0x02800003 ++#define FLASH_5720VENDOR_A_ST_M45PE10 0x02c00000 ++#define FLASH_5720VENDOR_A_ST_M45PE20 0x02c00002 ++#define FLASH_5720VENDOR_A_ST_M45PE40 0x02c00001 ++#define FLASH_5720VENDOR_A_ST_M45PE80 0x02c00003 ++#define FLASH_5720VENDOR_ATMEL_45USPT 0x03c00000 ++#define FLASH_5720VENDOR_ST_25USPT 0x03c00002 ++#define FLASH_5720VENDOR_ST_45USPT 0x03c00001 ++#define NVRAM_CFG1_5752PAGE_SIZE_MASK 0x70000000 ++#define FLASH_5752PAGE_SIZE_256 0x00000000 ++#define FLASH_5752PAGE_SIZE_512 0x10000000 ++#define FLASH_5752PAGE_SIZE_1K 0x20000000 ++#define FLASH_5752PAGE_SIZE_2K 0x30000000 ++#define FLASH_5752PAGE_SIZE_4K 0x40000000 ++#define FLASH_5752PAGE_SIZE_264 0x50000000 ++#define FLASH_5752PAGE_SIZE_528 0x60000000 ++#define NVRAM_CFG2 0x00007018 ++#define NVRAM_CFG3 0x0000701c ++#define NVRAM_SWARB 0x00007020 ++#define SWARB_REQ_SET0 0x00000001 ++#define SWARB_REQ_SET1 0x00000002 ++#define SWARB_REQ_SET2 0x00000004 ++#define SWARB_REQ_SET3 0x00000008 ++#define SWARB_REQ_CLR0 0x00000010 ++#define SWARB_REQ_CLR1 0x00000020 ++#define SWARB_REQ_CLR2 0x00000040 ++#define SWARB_REQ_CLR3 0x00000080 ++#define SWARB_GNT0 0x00000100 ++#define SWARB_GNT1 0x00000200 ++#define SWARB_GNT2 0x00000400 ++#define SWARB_GNT3 0x00000800 ++#define SWARB_REQ0 0x00001000 ++#define SWARB_REQ1 0x00002000 ++#define SWARB_REQ2 0x00004000 ++#define SWARB_REQ3 0x00008000 ++#define NVRAM_ACCESS 0x00007024 ++#define ACCESS_ENABLE 0x00000001 ++#define ACCESS_WR_ENABLE 0x00000002 ++#define NVRAM_WRITE1 0x00007028 ++/* 0x702c unused */ ++ ++#define NVRAM_ADDR_LOCKOUT 0x00007030 ++#define NVRAM_AUTOSENSE_STATUS 0x00007038 ++#define AUTOSENSE_DEVID 0x00000010 ++#define AUTOSENSE_DEVID_MASK 0x00000007 ++#define AUTOSENSE_SIZE_IN_MB 17 ++/* 0x703c --> 0x7500 unused */ ++ ++#define OTP_MODE 0x00007500 ++#define OTP_MODE_OTP_THRU_GRC 0x00000001 ++#define OTP_CTRL 0x00007504 ++#define OTP_CTRL_OTP_PROG_ENABLE 0x00200000 ++#define OTP_CTRL_OTP_CMD_READ 0x00000000 ++#define OTP_CTRL_OTP_CMD_INIT 0x00000008 ++#define OTP_CTRL_OTP_CMD_START 0x00000001 ++#define OTP_STATUS 0x00007508 ++#define OTP_STATUS_CMD_DONE 0x00000001 ++#define OTP_ADDRESS 0x0000750c ++#define OTP_ADDRESS_MAGIC1 0x000000a0 ++#define OTP_ADDRESS_MAGIC2 0x00000080 ++/* 0x7510 unused */ ++ ++#define OTP_READ_DATA 0x00007514 ++/* 0x7518 --> 0x7c04 unused */ ++ ++#define PCIE_TRANSACTION_CFG 0x00007c04 ++#define PCIE_TRANS_CFG_1SHOT_MSI 0x20000000 ++#define PCIE_TRANS_CFG_LOM 0x00000020 ++/* 0x7c08 --> 0x7d28 unused */ ++ ++#define PCIE_PWR_MGMT_THRESH 0x00007d28 ++#define PCIE_PWR_MGMT_L1_THRESH_MSK 0x0000ff00 ++#define PCIE_PWR_MGMT_L1_THRESH_4MS 0x0000ff00 ++#define PCIE_PWR_MGMT_EXT_ASPM_TMR_EN 0x01000000 ++/* 0x7d2c --> 0x7d54 unused */ ++ ++#define TG3_PCIE_LNKCTL 0x00007d54 ++#define TG3_PCIE_LNKCTL_L1_PLL_PD_EN 0x00000008 ++#define TG3_PCIE_LNKCTL_L1_PLL_PD_DIS 0x00000080 ++/* 0x7d58 --> 0x7e70 unused */ ++ ++#define TG3_PCIE_PHY_TSTCTL 0x00007e2c ++#define TG3_PCIE_PHY_TSTCTL_PCIE10 0x00000040 ++#define TG3_PCIE_PHY_TSTCTL_PSCRAM 0x00000020 ++ ++#define TG3_PCIE_EIDLE_DELAY 0x00007e70 ++#define TG3_PCIE_EIDLE_DELAY_MASK 0x0000001f ++#define TG3_PCIE_EIDLE_DELAY_13_CLKS 0x0000000c ++/* 0x7e74 --> 0x8000 unused */ ++ ++/* Alternate PCIE definitions */ ++#define TG3_PCIE_TLDLPL_PORT 0x00007c00 ++#define TG3_PCIE_DL_LO_FTSMAX 0x0000000c ++#define TG3_PCIE_DL_LO_FTSMAX_MSK 0x000000ff ++#define TG3_PCIE_DL_LO_FTSMAX_VAL 0x0000002c ++#define TG3_PCIE_PL_LO_PHYCTL1 0x00000004 ++#define TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN 0x00001000 ++#define TG3_PCIE_PL_LO_PHYCTL5 0x00000014 ++#define TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ 0x80000000 ++ ++#define TG3_REG_BLK_SIZE 0x00008000 ++ ++/* OTP bit definitions */ ++#define TG3_OTP_AGCTGT_MASK 0x000000e0 ++#define TG3_OTP_AGCTGT_SHIFT 1 ++#define TG3_OTP_HPFFLTR_MASK 0x00000300 ++#define TG3_OTP_HPFFLTR_SHIFT 1 ++#define TG3_OTP_HPFOVER_MASK 0x00000400 ++#define TG3_OTP_HPFOVER_SHIFT 1 ++#define TG3_OTP_LPFDIS_MASK 0x00000800 ++#define TG3_OTP_LPFDIS_SHIFT 11 ++#define TG3_OTP_VDAC_MASK 0xff000000 ++#define TG3_OTP_VDAC_SHIFT 24 ++#define TG3_OTP_10BTAMP_MASK 0x0000f000 ++#define TG3_OTP_10BTAMP_SHIFT 8 ++#define TG3_OTP_ROFF_MASK 0x00e00000 ++#define TG3_OTP_ROFF_SHIFT 11 ++#define TG3_OTP_RCOFF_MASK 0x001c0000 ++#define TG3_OTP_RCOFF_SHIFT 16 ++ ++#define TG3_OTP_DEFAULT 0x286c1640 ++ ++ ++/* Hardware Legacy NVRAM layout */ ++#define TG3_NVM_VPD_OFF 0x100 ++#define TG3_NVM_VPD_LEN 256 ++ ++/* Hardware Selfboot NVRAM layout */ ++#define TG3_NVM_HWSB_CFG1 0x00000004 ++#define TG3_NVM_HWSB_CFG1_MAJMSK 0xf8000000 ++#define TG3_NVM_HWSB_CFG1_MAJSFT 27 ++#define TG3_NVM_HWSB_CFG1_MINMSK 0x07c00000 ++#define TG3_NVM_HWSB_CFG1_MINSFT 22 ++ ++#define TG3_EEPROM_MAGIC 0x669955aa ++#define TG3_EEPROM_MAGIC_FW 0xa5000000 ++#define TG3_EEPROM_MAGIC_FW_MSK 0xff000000 ++#define TG3_EEPROM_SB_FORMAT_MASK 0x00e00000 ++#define TG3_EEPROM_SB_FORMAT_1 0x00200000 ++#define TG3_EEPROM_SB_REVISION_MASK 0x001f0000 ++#define TG3_EEPROM_SB_REVISION_0 0x00000000 ++#define TG3_EEPROM_SB_REVISION_2 0x00020000 ++#define TG3_EEPROM_SB_REVISION_3 0x00030000 ++#define TG3_EEPROM_SB_REVISION_4 0x00040000 ++#define TG3_EEPROM_SB_REVISION_5 0x00050000 ++#define TG3_EEPROM_SB_REVISION_6 0x00060000 ++#define TG3_EEPROM_MAGIC_HW 0xabcd ++#define TG3_EEPROM_MAGIC_HW_MSK 0xffff ++ ++#define TG3_NVM_DIR_START 0x18 ++#define TG3_NVM_DIR_END 0x78 ++#define TG3_NVM_DIRENT_SIZE 0xc ++#define TG3_NVM_DIRTYPE_SHIFT 24 ++#define TG3_NVM_DIRTYPE_LENMSK 0x003fffff ++#define TG3_NVM_DIRTYPE_ASFINI 1 ++#define TG3_NVM_DIRTYPE_EXTVPD 20 ++#define TG3_NVM_PTREV_BCVER 0x94 ++#define TG3_NVM_BCVER_MAJMSK 0x0000ff00 ++#define TG3_NVM_BCVER_MAJSFT 8 ++#define TG3_NVM_BCVER_MINMSK 0x000000ff ++ ++#define TG3_EEPROM_SB_F1R0_EDH_OFF 0x10 ++#define TG3_EEPROM_SB_F1R2_EDH_OFF 0x14 ++#define TG3_EEPROM_SB_F1R2_MBA_OFF 0x10 ++#define TG3_EEPROM_SB_F1R3_EDH_OFF 0x18 ++#define TG3_EEPROM_SB_F1R4_EDH_OFF 0x1c ++#define TG3_EEPROM_SB_F1R5_EDH_OFF 0x20 ++#define TG3_EEPROM_SB_F1R6_EDH_OFF 0x4c ++#define TG3_EEPROM_SB_EDH_MAJ_MASK 0x00000700 ++#define TG3_EEPROM_SB_EDH_MAJ_SHFT 8 ++#define TG3_EEPROM_SB_EDH_MIN_MASK 0x000000ff ++#define TG3_EEPROM_SB_EDH_BLD_MASK 0x0000f800 ++#define TG3_EEPROM_SB_EDH_BLD_SHFT 11 ++ ++ ++/* 32K Window into NIC internal memory */ ++#define NIC_SRAM_WIN_BASE 0x00008000 ++ ++/* Offsets into first 32k of NIC internal memory. */ ++#define NIC_SRAM_PAGE_ZERO 0x00000000 ++#define NIC_SRAM_SEND_RCB 0x00000100 /* 16 * TG3_BDINFO_... */ ++#define NIC_SRAM_RCV_RET_RCB 0x00000200 /* 16 * TG3_BDINFO_... */ ++#define NIC_SRAM_STATS_BLK 0x00000300 ++#define NIC_SRAM_STATUS_BLK 0x00000b00 ++ ++#define NIC_SRAM_FIRMWARE_MBOX 0x00000b50 ++#define NIC_SRAM_FIRMWARE_MBOX_MAGIC1 0x4B657654 ++#define NIC_SRAM_FIRMWARE_MBOX_MAGIC2 0x4861764b /* !dma on linkchg */ ++ ++#define NIC_SRAM_DATA_SIG 0x00000b54 ++#define NIC_SRAM_DATA_SIG_MAGIC 0x4b657654 /* ascii for 'KevT' */ ++ ++#define NIC_SRAM_DATA_CFG 0x00000b58 ++#define NIC_SRAM_DATA_CFG_LED_MODE_MASK 0x0000000c ++#define NIC_SRAM_DATA_CFG_LED_MODE_MAC 0x00000000 ++#define NIC_SRAM_DATA_CFG_LED_MODE_PHY_1 0x00000004 ++#define NIC_SRAM_DATA_CFG_LED_MODE_PHY_2 0x00000008 ++#define NIC_SRAM_DATA_CFG_PHY_TYPE_MASK 0x00000030 ++#define NIC_SRAM_DATA_CFG_PHY_TYPE_UNKNOWN 0x00000000 ++#define NIC_SRAM_DATA_CFG_PHY_TYPE_COPPER 0x00000010 ++#define NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER 0x00000020 ++#define NIC_SRAM_DATA_CFG_WOL_ENABLE 0x00000040 ++#define NIC_SRAM_DATA_CFG_ASF_ENABLE 0x00000080 ++#define NIC_SRAM_DATA_CFG_EEPROM_WP 0x00000100 ++#define NIC_SRAM_DATA_CFG_MINI_PCI 0x00001000 ++#define NIC_SRAM_DATA_CFG_FIBER_WOL 0x00004000 ++#define NIC_SRAM_DATA_CFG_NO_GPIO2 0x00100000 ++#define NIC_SRAM_DATA_CFG_APE_ENABLE 0x00200000 ++ ++#define NIC_SRAM_DATA_VER 0x00000b5c ++#define NIC_SRAM_DATA_VER_SHIFT 16 ++ ++#define NIC_SRAM_DATA_PHY_ID 0x00000b74 ++#define NIC_SRAM_DATA_PHY_ID1_MASK 0xffff0000 ++#define NIC_SRAM_DATA_PHY_ID2_MASK 0x0000ffff ++ ++#define NIC_SRAM_FW_CMD_MBOX 0x00000b78 ++#define FWCMD_NICDRV_ALIVE 0x00000001 ++#define FWCMD_NICDRV_PAUSE_FW 0x00000002 ++#define FWCMD_NICDRV_IPV4ADDR_CHG 0x00000003 ++#define FWCMD_NICDRV_IPV6ADDR_CHG 0x00000004 ++#define FWCMD_NICDRV_FIX_DMAR 0x00000005 ++#define FWCMD_NICDRV_FIX_DMAW 0x00000006 ++#define FWCMD_NICDRV_LINK_UPDATE 0x0000000c ++#define FWCMD_NICDRV_ALIVE2 0x0000000d ++#define FWCMD_NICDRV_ALIVE3 0x0000000e ++#define NIC_SRAM_FW_CMD_LEN_MBOX 0x00000b7c ++#define NIC_SRAM_FW_CMD_DATA_MBOX 0x00000b80 ++#define NIC_SRAM_FW_ASF_STATUS_MBOX 0x00000c00 ++#define NIC_SRAM_FW_DRV_STATE_MBOX 0x00000c04 ++#define DRV_STATE_START 0x00000001 ++#define DRV_STATE_START_DONE 0x80000001 ++#define DRV_STATE_UNLOAD 0x00000002 ++#define DRV_STATE_UNLOAD_DONE 0x80000002 ++#define DRV_STATE_WOL 0x00000003 ++#define DRV_STATE_SUSPEND 0x00000004 ++ ++#define NIC_SRAM_FW_RESET_TYPE_MBOX 0x00000c08 ++ ++#define NIC_SRAM_MAC_ADDR_HIGH_MBOX 0x00000c14 ++#define NIC_SRAM_MAC_ADDR_LOW_MBOX 0x00000c18 ++ ++#define NIC_SRAM_WOL_MBOX 0x00000d30 ++#define WOL_SIGNATURE 0x474c0000 ++#define WOL_DRV_STATE_SHUTDOWN 0x00000001 ++#define WOL_DRV_WOL 0x00000002 ++#define WOL_SET_MAGIC_PKT 0x00000004 ++ ++#define NIC_SRAM_DATA_CFG_2 0x00000d38 ++ ++#define NIC_SRAM_DATA_CFG_2_APD_EN 0x00004000 ++#define SHASTA_EXT_LED_MODE_MASK 0x00018000 ++#define SHASTA_EXT_LED_LEGACY 0x00000000 ++#define SHASTA_EXT_LED_SHARED 0x00008000 ++#define SHASTA_EXT_LED_MAC 0x00010000 ++#define SHASTA_EXT_LED_COMBO 0x00018000 ++ ++#define NIC_SRAM_DATA_CFG_3 0x00000d3c ++#define NIC_SRAM_ASPM_DEBOUNCE 0x00000002 ++#define NIC_SRAM_LNK_FLAP_AVOID 0x00400000 ++#define NIC_SRAM_1G_ON_VAUX_OK 0x00800000 ++ ++#define NIC_SRAM_DATA_CFG_4 0x00000d60 ++#define NIC_SRAM_GMII_MODE 0x00000002 ++#define NIC_SRAM_RGMII_INBAND_DISABLE 0x00000004 ++#define NIC_SRAM_RGMII_EXT_IBND_RX_EN 0x00000008 ++#define NIC_SRAM_RGMII_EXT_IBND_TX_EN 0x00000010 ++ ++#define NIC_SRAM_CPMU_STATUS 0x00000e00 ++#define NIC_SRAM_CPMUSTAT_SIG 0x0000362c ++#define NIC_SRAM_CPMUSTAT_SIG_MSK 0x0000ffff ++ ++#define NIC_SRAM_DATA_CFG_5 0x00000e0c ++#define NIC_SRAM_DISABLE_1G_HALF_ADV 0x00000002 ++ ++#define NIC_SRAM_RX_MINI_BUFFER_DESC 0x00001000 ++ ++#define NIC_SRAM_DMA_DESC_POOL_BASE 0x00002000 ++#define NIC_SRAM_DMA_DESC_POOL_SIZE 0x00002000 ++#define NIC_SRAM_TX_BUFFER_DESC 0x00004000 /* 512 entries */ ++#define NIC_SRAM_RX_BUFFER_DESC 0x00006000 /* 256 entries */ ++#define NIC_SRAM_RX_JUMBO_BUFFER_DESC 0x00007000 /* 256 entries */ ++#define NIC_SRAM_MBUF_POOL_BASE 0x00008000 ++#define NIC_SRAM_MBUF_POOL_SIZE96 0x00018000 ++#define NIC_SRAM_MBUF_POOL_SIZE64 0x00010000 ++#define NIC_SRAM_MBUF_POOL_BASE5705 0x00010000 ++#define NIC_SRAM_MBUF_POOL_SIZE5705 0x0000e000 ++ ++#define TG3_SRAM_RXCPU_SCRATCH_BASE_57766 0x00030000 ++#define TG3_SRAM_RXCPU_SCRATCH_SIZE_57766 0x00010000 ++#define TG3_SBROM_IN_SERVICE_LOOP 0x51 ++ ++#define TG3_SRAM_RX_STD_BDCACHE_SIZE_5700 128 ++#define TG3_SRAM_RX_STD_BDCACHE_SIZE_5755 64 ++#define TG3_SRAM_RX_STD_BDCACHE_SIZE_5906 32 ++ ++#define TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700 64 ++#define TG3_SRAM_RX_JMB_BDCACHE_SIZE_5717 16 ++ ++ ++/* Currently this is fixed. */ ++#define TG3_PHY_PCIE_ADDR 0x00 ++#define TG3_PHY_MII_ADDR 0x01 ++ ++ ++/*** Tigon3 specific PHY MII registers. ***/ ++#define MII_TG3_MMD_CTRL 0x0d /* MMD Access Control register */ ++#define MII_TG3_MMD_CTRL_DATA_NOINC 0x4000 ++#define MII_TG3_MMD_ADDRESS 0x0e /* MMD Address Data register */ ++ ++#define MII_TG3_EXT_CTRL 0x10 /* Extended control register */ ++#define MII_TG3_EXT_CTRL_FIFO_ELASTIC 0x0001 ++#define MII_TG3_EXT_CTRL_LNK3_LED_MODE 0x0002 ++#define MII_TG3_EXT_CTRL_FORCE_LED_OFF 0x0008 ++#define MII_TG3_EXT_CTRL_TBI 0x8000 ++ ++#define MII_TG3_EXT_STAT 0x11 /* Extended status register */ ++#define MII_TG3_EXT_STAT_MDIX 0x2000 ++#define MII_TG3_EXT_STAT_LPASS 0x0100 ++ ++#define MII_TG3_RXR_COUNTERS 0x14 /* Local/Remote Receiver Counts */ ++#define MII_TG3_DSP_RW_PORT 0x15 /* DSP coefficient read/write port */ ++#define MII_TG3_DSP_CONTROL 0x16 /* DSP control register */ ++#define MII_TG3_DSP_ADDRESS 0x17 /* DSP address register */ ++ ++#define MII_TG3_DSP_TAP1 0x0001 ++#define MII_TG3_DSP_TAP1_AGCTGT_DFLT 0x0007 ++#define MII_TG3_DSP_TAP26 0x001a ++#define MII_TG3_DSP_TAP26_ALNOKO 0x0001 ++#define MII_TG3_DSP_TAP26_RMRXSTO 0x0002 ++#define MII_TG3_DSP_TAP26_OPCSINPT 0x0004 ++#define MII_TG3_DSP_AADJ1CH0 0x001f ++#define MII_TG3_DSP_CH34TP2 0x4022 ++#define MII_TG3_DSP_CH34TP2_HIBW01 0x01ff ++#define MII_TG3_DSP_AADJ1CH3 0x601f ++#define MII_TG3_DSP_AADJ1CH3_ADCCKADJ 0x0002 ++#define MII_TG3_DSP_TLER 0x0d40 /* Top Level Expansion reg */ ++#define MII_TG3_DSP_TLER_AUTOGREEEN_EN 0x0001 ++#define MII_TG3_DSP_EXP1_INT_STAT 0x0f01 ++#define MII_TG3_DSP_EXP8 0x0f08 ++#define MII_TG3_DSP_EXP8_REJ2MHz 0x0001 ++#define MII_TG3_DSP_EXP8_AEDW 0x0200 ++#define MII_TG3_DSP_EXP75 0x0f75 ++#define MII_TG3_DSP_EXP75_SUP_CM_OSC 0x0001 ++#define MII_TG3_DSP_EXP96 0x0f96 ++#define MII_TG3_DSP_EXP97 0x0f97 ++ ++#define MII_TG3_AUX_CTRL 0x18 /* auxilliary control register */ ++ ++#define MII_TG3_AUXCTL_SHDWSEL_AUXCTL 0x0000 ++#define MII_TG3_AUXCTL_ACTL_TX_6DB 0x0400 ++#define MII_TG3_AUXCTL_ACTL_SMDSP_ENA 0x0800 ++#define MII_TG3_AUXCTL_ACTL_EXTPKTLEN 0x4000 ++#define MII_TG3_AUXCTL_ACTL_EXTLOOPBK 0x8000 ++ ++#define MII_TG3_AUXCTL_SHDWSEL_PWRCTL 0x0002 ++#define MII_TG3_AUXCTL_PCTL_WOL_EN 0x0008 ++#define MII_TG3_AUXCTL_PCTL_100TX_LPWR 0x0010 ++#define MII_TG3_AUXCTL_PCTL_SPR_ISOLATE 0x0020 ++#define MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC 0x0040 ++#define MII_TG3_AUXCTL_PCTL_VREG_11V 0x0180 ++ ++#define MII_TG3_AUXCTL_SHDWSEL_MISCTEST 0x0004 ++ ++#define MII_TG3_AUXCTL_SHDWSEL_MISC 0x0007 ++#define MII_TG3_AUXCTL_MISC_WIRESPD_EN 0x0010 ++#define MII_TG3_AUXCTL_MISC_RGMII_OOBSC 0x0020 ++#define MII_TG3_AUXCTL_MISC_FORCE_AMDIX 0x0200 ++#define MII_TG3_AUXCTL_MISC_RDSEL_SHIFT 12 ++#define MII_TG3_AUXCTL_MISC_WREN 0x8000 ++ ++#define MII_TG3_AUX_STAT 0x19 /* auxilliary status register */ ++#define MII_TG3_AUX_STAT_LPASS 0x0004 ++#define MII_TG3_AUX_STAT_SPDMASK 0x0700 ++#define MII_TG3_AUX_STAT_10HALF 0x0100 ++#define MII_TG3_AUX_STAT_10FULL 0x0200 ++#define MII_TG3_AUX_STAT_100HALF 0x0300 ++#define MII_TG3_AUX_STAT_100_4 0x0400 ++#define MII_TG3_AUX_STAT_100FULL 0x0500 ++#define MII_TG3_AUX_STAT_1000HALF 0x0600 ++#define MII_TG3_AUX_STAT_1000FULL 0x0700 ++#define MII_TG3_AUX_STAT_100 0x0008 ++#define MII_TG3_AUX_STAT_FULL 0x0001 ++ ++#define MII_TG3_ISTAT 0x1a /* IRQ status register */ ++#define MII_TG3_IMASK 0x1b /* IRQ mask register */ ++ ++/* ISTAT/IMASK event bits */ ++#define MII_TG3_INT_LINKCHG 0x0002 ++#define MII_TG3_INT_SPEEDCHG 0x0004 ++#define MII_TG3_INT_DUPLEXCHG 0x0008 ++#define MII_TG3_INT_ANEG_PAGE_RX 0x0400 ++ ++#define MII_TG3_MISC_SHDW 0x1c /* Misc shadow register */ ++#define MII_TG3_MISC_SHDW_WREN 0x8000 ++ ++#define MII_TG3_MISC_SHDW_SCR5_C125OE 0x0001 ++#define MII_TG3_MISC_SHDW_SCR5_DLLAPD 0x0002 ++#define MII_TG3_MISC_SHDW_SCR5_SDTL 0x0004 ++#define MII_TG3_MISC_SHDW_SCR5_DLPTLM 0x0008 ++#define MII_TG3_MISC_SHDW_SCR5_LPED 0x0010 ++#define MII_TG3_MISC_SHDW_SCR5_TRDDAPD 0x0100 ++#define MII_TG3_MISC_SHDW_SCR5_SEL 0x1400 ++ ++#define MII_TG3_MISC_SHDW_APD_WKTM_84MS 0x0001 ++#define MII_TG3_MISC_SHDW_APD_ENABLE 0x0020 ++#define MII_TG3_MISC_SHDW_APD_SEL 0x2800 ++ ++#define MII_TG3_MISC_SHDW_RGMII_MODESEL0 0x0008 ++#define MII_TG3_MISC_SHDW_RGMII_MODESEL1 0x0010 ++#define MII_TG3_MISC_SHDW_RGMII_SEL 0x2c00 ++ ++#define MII_TG3_TEST1 0x1e ++#define MII_TG3_TEST1_TRIM_EN 0x0010 ++#define MII_TG3_TEST1_CRC_EN 0x8000 ++ ++/* Clause 45 expansion registers */ ++#define TG3_CL45_D7_EEEADV_CAP 0x003c ++#define TG3_CL45_D7_EEEADV_CAP_100TX 0x0002 ++#define TG3_CL45_D7_EEEADV_CAP_1000T 0x0004 ++#define TG3_CL45_D7_EEERES_STAT 0x803e ++#define TG3_CL45_D7_EEERES_STAT_LP_100TX 0x0002 ++#define TG3_CL45_D7_EEERES_STAT_LP_1000T 0x0004 ++ ++ ++/* Fast Ethernet Tranceiver definitions */ ++#define MII_TG3_FET_PTEST 0x17 ++#define MII_TG3_FET_PTEST_TRIM_SEL 0x0010 ++#define MII_TG3_FET_PTEST_TRIM_2 0x0002 ++#define MII_TG3_FET_PTEST_FRC_TX_LINK 0x1000 ++#define MII_TG3_FET_PTEST_FRC_TX_LOCK 0x0800 ++ ++#define MII_TG3_FET_GEN_STAT 0x1c ++#define MII_TG3_FET_GEN_STAT_MDIXSTAT 0x2000 ++ ++#define MII_TG3_FET_TEST 0x1f ++#define MII_TG3_FET_SHADOW_EN 0x0080 ++ ++#define MII_TG3_FET_SHDW_MISCCTRL 0x10 ++#define MII_TG3_FET_SHDW_MISCCTRL_ELBK 0x1000 ++#define MII_TG3_FET_SHDW_MISCCTRL_MDIX 0x4000 ++ ++#define MII_TG3_FET_SHDW_AUXMODE4 0x1a ++#define MII_TG3_FET_SHDW_AM4_LED_MODE1 0x0001 ++#define MII_TG3_FET_SHDW_AM4_LED_MASK 0x0003 ++#define MII_TG3_FET_SHDW_AUXMODE4_SBPD 0x0008 ++ ++#define MII_TG3_FET_SHDW_AUXSTAT2 0x1b ++#define MII_TG3_FET_SHDW_AUXSTAT2_APD 0x0020 ++ ++/* Serdes PHY Register Definitions */ ++#define SERDES_TG3_1000X_STATUS 0x14 ++#define SERDES_TG3_SGMII_MODE 0x0001 ++#define SERDES_TG3_LINK_UP 0x0002 ++#define SERDES_TG3_FULL_DUPLEX 0x0004 ++#define SERDES_TG3_SPEED_100 0x0008 ++#define SERDES_TG3_SPEED_1000 0x0010 ++ ++/* APE registers. Accessible through BAR1 */ ++#define TG3_APE_GPIO_MSG 0x0008 ++#define TG3_APE_GPIO_MSG_SHIFT 4 ++#define TG3_APE_EVENT 0x000c ++#define APE_EVENT_1 0x00000001 ++#define TG3_APE_LOCK_REQ 0x002c ++#define APE_LOCK_REQ_DRIVER 0x00001000 ++#define TG3_APE_LOCK_GRANT 0x004c ++#define APE_LOCK_GRANT_DRIVER 0x00001000 ++#define TG3_APE_STICKY_TMR 0x00b0 ++#define TG3_APE_OTP_CTRL 0x00e8 ++#define APE_OTP_CTRL_PROG_EN 0x200000 ++#define APE_OTP_CTRL_CMD_RD 0x000000 ++#define APE_OTP_CTRL_START 0x000001 ++#define TG3_APE_OTP_STATUS 0x00ec ++#define APE_OTP_STATUS_CMD_DONE 0x000001 ++#define TG3_APE_OTP_ADDR 0x00f0 ++#define APE_OTP_ADDR_CPU_ENABLE 0x80000000 ++#define TG3_APE_OTP_RD_DATA 0x00f8 ++ ++#define OTP_ADDRESS_MAGIC0 0x00000050 ++#define TG3_OTP_MAGIC0_VALID(val) \ ++ ((((val) & 0xf0000000) == 0xa0000000) ||\ ++ (((val) & 0x0f000000) == 0x0a000000)) ++ ++/* APE shared memory. Accessible through BAR1 */ ++#define TG3_APE_SHMEM_BASE 0x4000 ++#define TG3_APE_SEG_SIG 0x4000 ++#define APE_SEG_SIG_MAGIC 0x41504521 ++#define TG3_APE_FW_STATUS 0x400c ++#define APE_FW_STATUS_READY 0x00000100 ++#define TG3_APE_FW_FEATURES 0x4010 ++#define TG3_APE_FW_FEATURE_NCSI 0x00000002 ++#define TG3_APE_FW_VERSION 0x4018 ++#define APE_FW_VERSION_MAJMSK 0xff000000 ++#define APE_FW_VERSION_MAJSFT 24 ++#define APE_FW_VERSION_MINMSK 0x00ff0000 ++#define APE_FW_VERSION_MINSFT 16 ++#define APE_FW_VERSION_REVMSK 0x0000ff00 ++#define APE_FW_VERSION_REVSFT 8 ++#define APE_FW_VERSION_BLDMSK 0x000000ff ++#define TG3_APE_SEG_MSG_BUF_OFF 0x401c ++#define TG3_APE_SEG_MSG_BUF_LEN 0x4020 ++#define TG3_APE_HOST_SEG_SIG 0x4200 ++#define APE_HOST_SEG_SIG_MAGIC 0x484f5354 ++#define TG3_APE_HOST_SEG_LEN 0x4204 ++#define APE_HOST_SEG_LEN_MAGIC 0x00000020 ++#define TG3_APE_HOST_INIT_COUNT 0x4208 ++#define TG3_APE_HOST_DRIVER_ID 0x420c ++#define APE_HOST_DRIVER_ID_LINUX 0xf0000000 ++#define APE_HOST_DRIVER_ID_ESX 0xfa000000 ++#if !defined(__VMKLNX__) ++#define APE_HOST_DRIVER_ID_MAGIC(maj, min, rev) \ ++ (APE_HOST_DRIVER_ID_LINUX | (maj & 0xff) << 16 | (min & 0xff) << 8 |\ ++ (rev & 0xff)) ++#else ++#define APE_HOST_DRIVER_ID_MAGIC(maj, min, rev) \ ++ (APE_HOST_DRIVER_ID_ESX | (maj & 0xff) << 16 | (min & 0xff) << 8 |\ ++ (rev & 0xff)) ++#endif ++#define TG3_APE_HOST_BEHAVIOR 0x4210 ++#define APE_HOST_BEHAV_NO_PHYLOCK 0x00000001 ++#define TG3_APE_HOST_HEARTBEAT_INT_MS 0x4214 ++#define APE_HOST_HEARTBEAT_INT_DISABLE 0 ++#define APE_HOST_HEARTBEAT_INT_5SEC 5000 ++#define TG3_APE_HOST_HEARTBEAT_COUNT 0x4218 ++#define TG3_APE_HOST_DRVR_STATE 0x421c ++#define TG3_APE_HOST_DRVR_STATE_START 0x00000001 ++#define TG3_APE_HOST_DRVR_STATE_UNLOAD 0x00000002 ++#define TG3_APE_HOST_DRVR_STATE_WOL 0x00000003 ++#define TG3_APE_HOST_WOL_SPEED 0x4224 ++#define TG3_APE_HOST_WOL_SPEED_AUTO 0x00008000 ++ ++#define TG3_APE_EVENT_STATUS 0x4300 ++ ++#define APE_EVENT_STATUS_DRIVER_EVNT 0x00000010 ++#define APE_EVENT_STATUS_STATE_CHNGE 0x00000500 ++#define APE_EVENT_STATUS_SCRTCHPD_READ 0x00001600 ++#define APE_EVENT_STATUS_SCRTCHPD_WRITE 0x00001700 ++#define APE_EVENT_STATUS_STATE_START 0x00010000 ++#define APE_EVENT_STATUS_STATE_UNLOAD 0x00020000 ++#define APE_EVENT_STATUS_STATE_WOL 0x00030000 ++#define APE_EVENT_STATUS_STATE_SUSPEND 0x00040000 ++#define APE_EVENT_STATUS_EVENT_PENDING 0x80000000 ++ ++#define TG3_APE_PER_LOCK_REQ 0x8400 ++#define APE_LOCK_PER_REQ_DRIVER 0x00001000 ++#define TG3_APE_PER_LOCK_GRANT 0x8420 ++#define APE_PER_LOCK_GRANT_DRIVER 0x00001000 ++ ++/* APE convenience enumerations. */ ++#define TG3_APE_LOCK_PHY0 0 ++#define TG3_APE_LOCK_GRC 1 ++#define TG3_APE_LOCK_PHY1 2 ++#define TG3_APE_LOCK_PHY2 3 ++#define TG3_APE_LOCK_MEM 4 ++#define TG3_APE_LOCK_PHY3 5 ++#define TG3_APE_LOCK_GPIO 7 ++#define TG3_APE_HB_INTERVAL (tp->ape_hb_interval) ++ ++/* There are two ways to manage the TX descriptors on the tigon3. ++ * Either the descriptors are in host DMA'able memory, or they ++ * exist only in the cards on-chip SRAM. All 16 send bds are under ++ * the same mode, they may not be configured individually. ++ * ++ * This driver always uses host memory TX descriptors. ++ * ++ * To use host memory TX descriptors: ++ * 1) Set GRC_MODE_HOST_SENDBDS in GRC_MODE register. ++ * Make sure GRC_MODE_4X_NIC_SEND_RINGS is clear. ++ * 2) Allocate DMA'able memory. ++ * 3) In NIC_SRAM_SEND_RCB (of desired index) of on-chip SRAM: ++ * a) Set TG3_BDINFO_HOST_ADDR to DMA address of memory ++ * obtained in step 2 ++ * b) Set TG3_BDINFO_NIC_ADDR to NIC_SRAM_TX_BUFFER_DESC. ++ * c) Set len field of TG3_BDINFO_MAXLEN_FLAGS to number ++ * of TX descriptors. Leave flags field clear. ++ * 4) Access TX descriptors via host memory. The chip ++ * will refetch into local SRAM as needed when producer ++ * index mailboxes are updated. ++ * ++ * To use on-chip TX descriptors: ++ * 1) Set GRC_MODE_4X_NIC_SEND_RINGS in GRC_MODE register. ++ * Make sure GRC_MODE_HOST_SENDBDS is clear. ++ * 2) In NIC_SRAM_SEND_RCB (of desired index) of on-chip SRAM: ++ * a) Set TG3_BDINFO_HOST_ADDR to zero. ++ * b) Set TG3_BDINFO_NIC_ADDR to NIC_SRAM_TX_BUFFER_DESC ++ * c) TG3_BDINFO_MAXLEN_FLAGS is don't care. ++ * 3) Access TX descriptors directly in on-chip SRAM ++ * using normal {read,write}l(). (and not using ++ * pointer dereferencing of ioremap()'d memory like ++ * the broken Broadcom driver does) ++ * ++ * Note that BDINFO_FLAGS_DISABLED should be set in the flags field of ++ * TG3_BDINFO_MAXLEN_FLAGS of all unused SEND_RCB indices. ++ */ ++struct tg3_tx_buffer_desc { ++ u32 addr_hi; ++ u32 addr_lo; ++ ++ u32 len_flags; ++#define TXD_FLAG_TCPUDP_CSUM 0x0001 ++#define TXD_FLAG_IP_CSUM 0x0002 ++#define TXD_FLAG_END 0x0004 ++#define TXD_FLAG_IP_FRAG 0x0008 ++#define TXD_FLAG_JMB_PKT 0x0008 ++#define TXD_FLAG_IP_FRAG_END 0x0010 ++#define TXD_FLAG_HWTSTAMP 0x0020 ++#define TXD_FLAG_VLAN 0x0040 ++#define TXD_FLAG_COAL_NOW 0x0080 ++#define TXD_FLAG_CPU_PRE_DMA 0x0100 ++#define TXD_FLAG_CPU_POST_DMA 0x0200 ++#define TXD_FLAG_ADD_SRC_ADDR 0x1000 ++#define TXD_FLAG_CHOOSE_SRC_ADDR 0x6000 ++#define TXD_FLAG_NO_CRC 0x8000 ++#define TXD_LEN_SHIFT 16 ++ ++ u32 vlan_tag; ++#define TXD_VLAN_TAG_SHIFT 0 ++#define TXD_MSS_SHIFT 16 ++}; ++ ++#define TXD_ADDR 0x00UL /* 64-bit */ ++#define TXD_LEN_FLAGS 0x08UL /* 32-bit (upper 16-bits are len) */ ++#define TXD_VLAN_TAG 0x0cUL /* 32-bit (upper 16-bits are tag) */ ++#define TXD_SIZE 0x10UL ++ ++struct tg3_rx_buffer_desc { ++ u32 addr_hi; ++ u32 addr_lo; ++ ++ u32 idx_len; ++#define RXD_IDX_MASK 0xffff0000 ++#define RXD_IDX_SHIFT 16 ++#define RXD_LEN_MASK 0x0000ffff ++#define RXD_LEN_SHIFT 0 ++ ++ u32 type_flags; ++#define RXD_TYPE_SHIFT 16 ++#define RXD_FLAGS_SHIFT 0 ++ ++#define RXD_FLAG_END 0x0004 ++#define RXD_FLAG_MINI 0x0800 ++#define RXD_FLAG_JUMBO 0x0020 ++#define RXD_FLAG_VLAN 0x0040 ++#define RXD_FLAG_ERROR 0x0400 ++#define RXD_FLAG_IP_CSUM 0x1000 ++#define RXD_FLAG_TCPUDP_CSUM 0x2000 ++#define RXD_FLAG_IS_TCP 0x4000 ++#define RXD_FLAG_PTPSTAT_MASK 0x0210 ++#define RXD_FLAG_PTPSTAT_PTPV1 0x0010 ++#define RXD_FLAG_PTPSTAT_PTPV2 0x0200 ++ ++ u32 ip_tcp_csum; ++#define RXD_IPCSUM_MASK 0xffff0000 ++#define RXD_IPCSUM_SHIFT 16 ++#define RXD_TCPCSUM_MASK 0x0000ffff ++#define RXD_TCPCSUM_SHIFT 0 ++ ++ u32 err_vlan; ++ ++#define RXD_VLAN_MASK 0x0000ffff ++ ++#define RXD_ERR_BAD_CRC 0x00010000 ++#define RXD_ERR_COLLISION 0x00020000 ++#define RXD_ERR_LINK_LOST 0x00040000 ++#define RXD_ERR_PHY_DECODE 0x00080000 ++#define RXD_ERR_ODD_NIBBLE_RCVD_MII 0x00100000 ++#define RXD_ERR_MAC_ABRT 0x00200000 ++#define RXD_ERR_TOO_SMALL 0x00400000 ++#define RXD_ERR_NO_RESOURCES 0x00800000 ++#define RXD_ERR_HUGE_FRAME 0x01000000 ++ ++#define RXD_ERR_MASK (RXD_ERR_BAD_CRC | RXD_ERR_COLLISION | \ ++ RXD_ERR_LINK_LOST | RXD_ERR_PHY_DECODE | \ ++ RXD_ERR_MAC_ABRT | RXD_ERR_TOO_SMALL | \ ++ RXD_ERR_NO_RESOURCES | RXD_ERR_HUGE_FRAME) ++ ++ u32 reserved; ++ u32 opaque; ++#define RXD_OPAQUE_INDEX_MASK 0x0000ffff ++#define RXD_OPAQUE_INDEX_SHIFT 0 ++#define RXD_OPAQUE_RING_STD 0x00010000 ++#define RXD_OPAQUE_RING_JUMBO 0x00020000 ++#define RXD_OPAQUE_RING_MINI 0x00040000 ++#define RXD_OPAQUE_RING_MASK 0x00070000 ++}; ++ ++struct tg3_ext_rx_buffer_desc { ++ struct { ++ u32 addr_hi; ++ u32 addr_lo; ++ } addrlist[3]; ++ u32 len2_len1; ++ u32 resv_len3; ++ struct tg3_rx_buffer_desc std; ++}; ++ ++/* We only use this when testing out the DMA engine ++ * at probe time. This is the internal format of buffer ++ * descriptors used by the chip at NIC_SRAM_DMA_DESCS. ++ */ ++struct tg3_internal_buffer_desc { ++ u32 addr_hi; ++ u32 addr_lo; ++ u32 nic_mbuf; ++ /* XXX FIX THIS */ ++#ifdef __BIG_ENDIAN ++ u16 cqid_sqid; ++ u16 len; ++#else ++ u16 len; ++ u16 cqid_sqid; ++#endif ++ u32 flags; ++ u32 __cookie1; ++ u32 __cookie2; ++ u32 __cookie3; ++}; ++ ++#define TG3_HW_STATUS_SIZE 0x50 ++struct tg3_hw_status { ++ volatile u32 status; ++#define SD_STATUS_UPDATED 0x00000001 ++#define SD_STATUS_LINK_CHG 0x00000002 ++#define SD_STATUS_ERROR 0x00000004 ++ ++ volatile u32 status_tag; ++ ++#ifdef __BIG_ENDIAN ++ volatile u16 rx_consumer; ++ volatile u16 rx_jumbo_consumer; ++#else ++ volatile u16 rx_jumbo_consumer; ++ volatile u16 rx_consumer; ++#endif ++ ++#ifdef __BIG_ENDIAN ++ volatile u16 reserved; ++ volatile u16 rx_mini_consumer; ++#else ++ volatile u16 rx_mini_consumer; ++ volatile u16 reserved; ++#endif ++ struct { ++#ifdef __BIG_ENDIAN ++ volatile u16 tx_consumer; ++ volatile u16 rx_producer; ++#else ++ volatile u16 rx_producer; ++ volatile u16 tx_consumer; ++#endif ++ } idx[16]; ++}; ++ ++typedef struct { ++ u32 high, low; ++} tg3_stat64_t; ++ ++struct tg3_hw_stats { ++ u8 __reserved0[0x400-0x300]; ++ ++ /* Statistics maintained by Receive MAC. */ ++ tg3_stat64_t rx_octets; ++ u64 __reserved1; ++ tg3_stat64_t rx_fragments; ++ tg3_stat64_t rx_ucast_packets; ++ tg3_stat64_t rx_mcast_packets; ++ tg3_stat64_t rx_bcast_packets; ++ tg3_stat64_t rx_fcs_errors; ++ tg3_stat64_t rx_align_errors; ++ tg3_stat64_t rx_xon_pause_rcvd; ++ tg3_stat64_t rx_xoff_pause_rcvd; ++ tg3_stat64_t rx_mac_ctrl_rcvd; ++ tg3_stat64_t rx_xoff_entered; ++ tg3_stat64_t rx_frame_too_long_errors; ++ tg3_stat64_t rx_jabbers; ++ tg3_stat64_t rx_undersize_packets; ++ tg3_stat64_t rx_in_length_errors; ++ tg3_stat64_t rx_out_length_errors; ++ tg3_stat64_t rx_64_or_less_octet_packets; ++ tg3_stat64_t rx_65_to_127_octet_packets; ++ tg3_stat64_t rx_128_to_255_octet_packets; ++ tg3_stat64_t rx_256_to_511_octet_packets; ++ tg3_stat64_t rx_512_to_1023_octet_packets; ++ tg3_stat64_t rx_1024_to_1522_octet_packets; ++ tg3_stat64_t rx_1523_to_2047_octet_packets; ++ tg3_stat64_t rx_2048_to_4095_octet_packets; ++ tg3_stat64_t rx_4096_to_8191_octet_packets; ++ tg3_stat64_t rx_8192_to_9022_octet_packets; ++ ++ u64 __unused0[37]; ++ ++ /* Statistics maintained by Transmit MAC. */ ++ tg3_stat64_t tx_octets; ++ u64 __reserved2; ++ tg3_stat64_t tx_collisions; ++ tg3_stat64_t tx_xon_sent; ++ tg3_stat64_t tx_xoff_sent; ++ tg3_stat64_t tx_flow_control; ++ tg3_stat64_t tx_mac_errors; ++ tg3_stat64_t tx_single_collisions; ++ tg3_stat64_t tx_mult_collisions; ++ tg3_stat64_t tx_deferred; ++ u64 __reserved3; ++ tg3_stat64_t tx_excessive_collisions; ++ tg3_stat64_t tx_late_collisions; ++ tg3_stat64_t tx_collide_2times; ++ tg3_stat64_t tx_collide_3times; ++ tg3_stat64_t tx_collide_4times; ++ tg3_stat64_t tx_collide_5times; ++ tg3_stat64_t tx_collide_6times; ++ tg3_stat64_t tx_collide_7times; ++ tg3_stat64_t tx_collide_8times; ++ tg3_stat64_t tx_collide_9times; ++ tg3_stat64_t tx_collide_10times; ++ tg3_stat64_t tx_collide_11times; ++ tg3_stat64_t tx_collide_12times; ++ tg3_stat64_t tx_collide_13times; ++ tg3_stat64_t tx_collide_14times; ++ tg3_stat64_t tx_collide_15times; ++ tg3_stat64_t tx_ucast_packets; ++ tg3_stat64_t tx_mcast_packets; ++ tg3_stat64_t tx_bcast_packets; ++ tg3_stat64_t tx_carrier_sense_errors; ++ tg3_stat64_t tx_discards; ++ tg3_stat64_t tx_errors; ++ ++ u64 __unused1[31]; ++ ++ /* Statistics maintained by Receive List Placement. */ ++ tg3_stat64_t COS_rx_packets[16]; ++ tg3_stat64_t COS_rx_filter_dropped; ++ tg3_stat64_t dma_writeq_full; ++ tg3_stat64_t dma_write_prioq_full; ++ tg3_stat64_t rxbds_empty; ++ tg3_stat64_t rx_discards; ++ tg3_stat64_t rx_errors; ++ tg3_stat64_t rx_threshold_hit; ++ ++ u64 __unused2[9]; ++ ++ /* Statistics maintained by Send Data Initiator. */ ++ tg3_stat64_t COS_out_packets[16]; ++ tg3_stat64_t dma_readq_full; ++ tg3_stat64_t dma_read_prioq_full; ++ tg3_stat64_t tx_comp_queue_full; ++ ++ /* Statistics maintained by Host Coalescing. */ ++ tg3_stat64_t ring_set_send_prod_index; ++ tg3_stat64_t ring_status_update; ++ tg3_stat64_t nic_irqs; ++ tg3_stat64_t nic_avoided_irqs; ++ tg3_stat64_t nic_tx_threshold_hit; ++ ++ /* NOT a part of the hardware statistics block format. ++ * These stats are here as storage for tg3_periodic_fetch_stats(). ++ */ ++ tg3_stat64_t mbuf_lwm_thresh_hit; ++ ++ u8 __reserved4[0xb00-0x9c8]; ++}; ++ ++#define TG3_SD_NUM_RECS 3 ++#define TG3_OCIR_LEN (sizeof(struct tg3_ocir)) ++#define TG3_OCIR_SIG_MAGIC 0x5253434f ++#define TG3_OCIR_FLAG_ACTIVE 0x00000001 ++ ++#define TG3_TEMP_CAUTION_OFFSET 0xc8 ++#define TG3_TEMP_MAX_OFFSET 0xcc ++#define TG3_TEMP_SENSOR_OFFSET 0xd4 ++ ++#define TG3_OCIR_DRVR_FEAT_CSUM 0x00000001 ++#define TG3_OCIR_DRVR_FEAT_TSO 0x00000002 ++#define TG3_OCIR_DRVR_FEAT_MASK 0xff ++ ++#define TG3_OCIR_REFRESH_TMR_OFF 0x00000008 ++#define TG3_OCIR_UPDATE_TMR_OFF 0x0000000c ++#define TG3_OCIR_PORT0_FLGS_OFF 0x0000002c ++ ++ ++ ++struct tg3_ocir { ++ u32 signature; ++ u16 version_flags; ++ u16 refresh_int; ++ u32 refresh_tmr; ++ u32 update_tmr; ++ u32 dst_base_addr; ++ u16 src_hdr_offset; ++ u16 src_hdr_length; ++ u16 src_data_offset; ++ u16 src_data_length; ++ u16 dst_hdr_offset; ++ u16 dst_data_offset; ++ u16 dst_reg_upd_offset; ++ u16 dst_sem_offset; ++ u32 reserved1[2]; ++ u32 port0_flags; ++ u32 port1_flags; ++ u32 port2_flags; ++ u32 port3_flags; ++ u32 reserved2[1]; ++}; ++ ++/* 'mapping' is superfluous as the chip does not write into ++ * the tx/rx post rings so we could just fetch it from there. ++ * But the cache behavior is better how we are doing it now. ++ */ ++struct ring_info { ++#ifdef BCM_HAS_BUILD_SKB ++ u8 *data; ++#else ++ struct sk_buff *data; ++#endif ++ DEFINE_DMA_UNMAP_ADDR(mapping); ++}; ++ ++struct tg3_tx_ring_info { ++ struct sk_buff *skb; ++ DEFINE_DMA_UNMAP_ADDR(mapping); ++ bool fragmented; ++}; ++ ++struct tg3_link_config { ++ /* Describes what we're trying to get. */ ++ u32 advertising; ++ u16 speed; ++ u8 duplex; ++ u8 autoneg; ++ u8 flowctrl; ++ ++ /* Describes what we actually have. */ ++ u8 active_flowctrl; ++ ++ u8 active_duplex; ++ u16 active_speed; ++ u32 rmt_adv; ++}; ++ ++struct tg3_bufmgr_config { ++ u32 mbuf_read_dma_low_water; ++ u32 mbuf_mac_rx_low_water; ++ u32 mbuf_high_water; ++ ++ u32 mbuf_read_dma_low_water_jumbo; ++ u32 mbuf_mac_rx_low_water_jumbo; ++ u32 mbuf_high_water_jumbo; ++ ++ u32 dma_low_water; ++ u32 dma_high_water; ++}; ++ ++struct tg3_ethtool_stats { ++ /* Statistics maintained by Receive MAC. */ ++ u64 rx_octets; ++ u64 rx_fragments; ++ u64 rx_ucast_packets; ++ u64 rx_mcast_packets; ++ u64 rx_bcast_packets; ++ u64 rx_fcs_errors; ++ u64 rx_align_errors; ++ u64 rx_xon_pause_rcvd; ++ u64 rx_xoff_pause_rcvd; ++ u64 rx_mac_ctrl_rcvd; ++ u64 rx_xoff_entered; ++ u64 rx_frame_too_long_errors; ++ u64 rx_jabbers; ++ u64 rx_undersize_packets; ++ u64 rx_in_length_errors; ++ u64 rx_out_length_errors; ++ u64 rx_64_or_less_octet_packets; ++ u64 rx_65_to_127_octet_packets; ++ u64 rx_128_to_255_octet_packets; ++ u64 rx_256_to_511_octet_packets; ++ u64 rx_512_to_1023_octet_packets; ++ u64 rx_1024_to_1522_octet_packets; ++ u64 rx_1523_to_2047_octet_packets; ++ u64 rx_2048_to_4095_octet_packets; ++ u64 rx_4096_to_8191_octet_packets; ++ u64 rx_8192_to_9022_octet_packets; ++ ++ /* Statistics maintained by Transmit MAC. */ ++ u64 tx_octets; ++ u64 tx_collisions; ++ u64 tx_xon_sent; ++ u64 tx_xoff_sent; ++ u64 tx_flow_control; ++ u64 tx_mac_errors; ++ u64 tx_single_collisions; ++ u64 tx_mult_collisions; ++ u64 tx_deferred; ++ u64 tx_excessive_collisions; ++ u64 tx_late_collisions; ++ u64 tx_collide_2times; ++ u64 tx_collide_3times; ++ u64 tx_collide_4times; ++ u64 tx_collide_5times; ++ u64 tx_collide_6times; ++ u64 tx_collide_7times; ++ u64 tx_collide_8times; ++ u64 tx_collide_9times; ++ u64 tx_collide_10times; ++ u64 tx_collide_11times; ++ u64 tx_collide_12times; ++ u64 tx_collide_13times; ++ u64 tx_collide_14times; ++ u64 tx_collide_15times; ++ u64 tx_ucast_packets; ++ u64 tx_mcast_packets; ++ u64 tx_bcast_packets; ++ u64 tx_carrier_sense_errors; ++ u64 tx_discards; ++ u64 tx_errors; ++ ++ /* Statistics maintained by Receive List Placement. */ ++ u64 dma_writeq_full; ++ u64 dma_write_prioq_full; ++ u64 rxbds_empty; ++ u64 rx_discards; ++ u64 rx_errors; ++ u64 rx_threshold_hit; ++ ++ /* Statistics maintained by Send Data Initiator. */ ++ u64 dma_readq_full; ++ u64 dma_read_prioq_full; ++ u64 tx_comp_queue_full; ++ ++ /* Statistics maintained by Host Coalescing. */ ++ u64 ring_set_send_prod_index; ++ u64 ring_status_update; ++ u64 nic_irqs; ++ u64 nic_avoided_irqs; ++ u64 nic_tx_threshold_hit; ++ ++ u64 mbuf_lwm_thresh_hit; ++ u64 dma_4g_cross; ++#if !defined(__VMKLNX__) ++ u64 recoverable_err; ++ u64 unrecoverable_err; ++#endif ++}; ++ ++#if defined(__VMKLNX__) ++#include "tg3_vmware.h" ++#endif ++ ++struct tg3_rx_prodring_set { ++#ifdef TG3_VMWARE_NETQ_ENABLE ++ u32 rx_std_mbox; ++ u32 rx_jmb_mbox; ++#endif ++ u32 rx_std_prod_idx; ++ u32 rx_std_cons_idx; ++ u32 rx_jmb_prod_idx; ++ u32 rx_jmb_cons_idx; ++ struct tg3_rx_buffer_desc *rx_std; ++ struct tg3_ext_rx_buffer_desc *rx_jmb; ++ struct ring_info *rx_std_buffers; ++ struct ring_info *rx_jmb_buffers; ++ dma_addr_t rx_std_mapping; ++ dma_addr_t rx_jmb_mapping; ++}; ++ ++#define TG3_RSS_MAX_NUM_QS 4 ++#define TG3_IRQ_MAX_VECS_RSS TG3_RSS_MAX_NUM_QS + 1 ++ ++#if defined(__VMKLNX__) ++#if defined(TG3_INBOX) ++ #define TG3_IRQ_MAX_VECS 1 ++#elif defined(TG3_VMWARE_NETQ_ENABLE) ++ #define TG3_IRQ_MAX_VECS_IOV 17 ++ #define TG3_IRQ_MAX_VECS TG3_IRQ_MAX_VECS_IOV ++#endif ++#endif /* __VMKLNX__ */ ++ ++#ifndef TG3_IRQ_MAX_VECS ++#define TG3_IRQ_MAX_VECS TG3_IRQ_MAX_VECS_RSS ++#endif ++ ++struct tg3_napi { ++#ifdef TG3_NAPI ++ struct napi_struct napi ____cacheline_aligned; ++#endif ++ struct tg3 *tp; ++ struct tg3_hw_status *hw_status; ++ ++ u32 chk_msi_cnt; ++ u32 last_tag; ++ u32 last_irq_tag; ++ u32 int_mbox; ++ u32 coal_now; ++ ++ u32 consmbox ____cacheline_aligned; ++ u32 rx_rcb_ptr; ++ u32 last_rx_cons; ++ volatile u16 *rx_rcb_prod_idx; ++ struct tg3_rx_prodring_set *srcprodring; ++ struct tg3_rx_prodring_set prodring; ++ struct tg3_rx_buffer_desc *rx_rcb; ++ ++ u32 tx_prod ____cacheline_aligned; ++ u32 tx_cons; ++ u32 tx_pending; ++ u32 last_tx_cons; ++ u32 prodmbox; ++ struct tg3_tx_buffer_desc *tx_ring; ++ struct tg3_tx_ring_info *tx_buffers; ++ ++ dma_addr_t status_mapping; ++ dma_addr_t rx_rcb_mapping; ++ dma_addr_t tx_desc_mapping; ++ ++ char irq_lbl[IFNAMSIZ]; ++ unsigned int irq_vec; ++ ++#if defined(__VMKLNX__) && !defined(TG3_VMWARE_NETQ_DISABLE) ++ struct tg3_netq_napi netq; ++#endif ++}; ++ ++enum TG3_FLAGS { ++ TG3_FLAG_TAGGED_STATUS = 0, ++ TG3_FLAG_TXD_MBOX_HWBUG, ++ TG3_FLAG_USE_LINKCHG_REG, ++ TG3_FLAG_ERROR_PROCESSED, ++ TG3_FLAG_ENABLE_ASF, ++ TG3_FLAG_ASPM_WORKAROUND, ++ TG3_FLAG_POLL_SERDES, ++ TG3_FLAG_POLL_CPMU_LINK, ++ TG3_FLAG_MBOX_WRITE_REORDER, ++ TG3_FLAG_PCIX_TARGET_HWBUG, ++ TG3_FLAG_WOL_SPEED_100MB, ++ TG3_FLAG_WOL_ENABLE, ++ TG3_FLAG_EEPROM_WRITE_PROT, ++ TG3_FLAG_NVRAM, ++ TG3_FLAG_NVRAM_BUFFERED, ++ TG3_FLAG_SUPPORT_MSI, ++ TG3_FLAG_SUPPORT_MSIX, ++ TG3_FLAG_PCIX_MODE, ++ TG3_FLAG_PCI_HIGH_SPEED, ++ TG3_FLAG_PCI_32BIT, ++ TG3_FLAG_SRAM_USE_CONFIG, ++ TG3_FLAG_TX_RECOVERY_PENDING, ++ TG3_FLAG_WOL_CAP, ++ TG3_FLAG_JUMBO_RING_ENABLE, ++ TG3_FLAG_PAUSE_AUTONEG, ++ TG3_FLAG_CPMU_PRESENT, ++ TG3_FLAG_40BIT_DMA_BUG, ++ TG3_FLAG_BROKEN_CHECKSUMS, ++ TG3_FLAG_JUMBO_CAPABLE, ++ TG3_FLAG_CHIP_RESETTING, ++ TG3_FLAG_INIT_COMPLETE, ++ TG3_FLAG_MAX_RXPEND_64, ++ TG3_FLAG_PCI_EXPRESS, /* BCM5785 + pci_is_pcie() */ ++ TG3_FLAG_ASF_NEW_HANDSHAKE, ++ TG3_FLAG_HW_AUTONEG, ++ TG3_FLAG_IS_NIC, ++ TG3_FLAG_FLASH, ++ TG3_FLAG_FW_TSO, ++ TG3_FLAG_HW_TSO_1, ++ TG3_FLAG_HW_TSO_2, ++ TG3_FLAG_HW_TSO_3, ++ TG3_FLAG_TSO_CAPABLE, ++ TG3_FLAG_TSO_BUG, ++ TG3_FLAG_USING_MSI, ++ TG3_FLAG_USING_MSIX, ++ TG3_FLAG_ICH_WORKAROUND, ++ TG3_FLAG_1SHOT_MSI, ++ TG3_FLAG_NO_FWARE_REPORTED, ++ TG3_FLAG_NO_NVRAM_ADDR_TRANS, ++ TG3_FLAG_ENABLE_APE, ++ TG3_FLAG_PROTECTED_NVRAM, ++ TG3_FLAG_5701_DMA_BUG, ++ TG3_FLAG_USE_PHYLIB, ++ TG3_FLAG_MDIOBUS_INITED, ++ TG3_FLAG_LRG_PROD_RING_CAP, ++ TG3_FLAG_RGMII_INBAND_DISABLE, ++ TG3_FLAG_RGMII_EXT_IBND_RX_EN, ++ TG3_FLAG_RGMII_EXT_IBND_TX_EN, ++ TG3_FLAG_CLKREQ_BUG, ++ TG3_FLAG_NO_NVRAM, ++ TG3_FLAG_ENABLE_RSS, ++ TG3_FLAG_ENABLE_TSS, ++ TG3_FLAG_SHORT_DMA_BUG, ++ TG3_FLAG_USE_JUMBO_BDFLAG, ++ TG3_FLAG_L1PLLPD_EN, ++ TG3_FLAG_APE_HAS_NCSI, ++ TG3_FLAG_TX_TSTAMP_EN, ++ TG3_FLAG_4K_FIFO_LIMIT, ++ TG3_FLAG_NO_TSO_BD_LIMIT, ++ TG3_FLAG_5719_5720_RDMA_BUG, ++ TG3_FLAG_RESET_TASK_PENDING, ++ TG3_FLAG_USER_INDIR_TBL, ++ TG3_FLAG_PTP_CAPABLE, ++ TG3_FLAG_5705_PLUS, ++ TG3_FLAG_IS_5788, ++ TG3_FLAG_5750_PLUS, ++ TG3_FLAG_5780_CLASS, ++ TG3_FLAG_5755_PLUS, ++ TG3_FLAG_57765_PLUS, ++ TG3_FLAG_57765_CLASS, ++ TG3_FLAG_5717_PLUS, ++ TG3_FLAG_IS_SSB_CORE, ++ TG3_FLAG_FLUSH_POSTED_WRITES, ++ TG3_FLAG_ROBOSWITCH, ++ TG3_FLAG_ONE_DMA_AT_ONCE, ++ TG3_FLAG_RGMII_MODE, ++ ++ TG3_FLAG_IOV_CAPABLE, ++ TG3_FLAG_ENABLE_IOV, ++ ++ /* Add new flags before this comment and TG3_FLAG_NUMBER_OF_FLAGS */ ++ TG3_FLAG_NUMBER_OF_FLAGS, /* Last entry in enum TG3_FLAGS */ ++}; ++ ++struct tg3 { ++ /* begin "general, frequently-used members" cacheline section */ ++ ++ /* If the IRQ handler (which runs lockless) needs to be ++ * quiesced, the following bitmask state is used. The ++ * SYNC flag is set by non-IRQ context code to initiate ++ * the quiescence. ++ * ++ * When the IRQ handler notices that SYNC is set, it ++ * disables interrupts and returns. ++ * ++ * When all outstanding IRQ handlers have returned after ++ * the SYNC flag has been set, the setter can be assured ++ * that interrupts will no longer get run. ++ * ++ * In this way all SMP driver locks are never acquired ++ * in hw IRQ context, only sw IRQ context or lower. ++ */ ++ unsigned int irq_sync; ++ ++ /* SMP locking strategy: ++ * ++ * lock: Held during reset, PHY access, timer, and when ++ * updating tg3_flags. ++ * ++ * netif_tx_lock: Held during tg3_start_xmit. tg3_tx holds ++ * netif_tx_lock when it needs to call ++ * netif_wake_queue. ++ * ++ * Both of these locks are to be held with BH safety. ++ * ++ * Because the IRQ handler, tg3_poll, and tg3_start_xmit ++ * are running lockless, it is necessary to completely ++ * quiesce the chip with tg3_netif_stop and tg3_full_lock ++ * before reconfiguring the device. ++ * ++ * indirect_lock: Held when accessing registers indirectly ++ * with IRQ disabling. ++ */ ++ spinlock_t lock; ++ spinlock_t indirect_lock; ++ ++ u32 (*read32) (struct tg3 *, u32); ++ void (*write32) (struct tg3 *, u32, u32); ++ u32 (*read32_mbox) (struct tg3 *, u32); ++ void (*write32_mbox) (struct tg3 *, u32, ++ u32); ++ void __iomem *regs; ++ void __iomem *aperegs; ++ struct net_device *dev; ++ struct pci_dev *pdev; ++ ++ u32 coal_now; ++ u32 msg_enable; ++ ++#ifdef BCM_HAS_IEEE1588_SUPPORT ++#if IS_ENABLED(CONFIG_PTP_1588_CLOCK) ++ struct ptp_clock_info ptp_info; ++ struct ptp_clock *ptp_clock; ++ s64 ptp_adjust; ++#else /* IS_ENABLED(CONFIG_PTP_1588_CLOCK) */ ++ struct cyclecounter cycles; ++ struct timecounter clock; ++ struct timecompare compare; ++#endif /* IS_ENABLED(CONFIG_PTP_1588_CLOCK) */ ++#endif /* BCM_HAS_IEEE1588_SUPPORT */ ++ ++ /* begin "tx thread" cacheline section */ ++ void (*write32_tx_mbox) (struct tg3 *, u32, ++ u32); ++ u32 dma_limit; ++ u32 txq_req; ++ u32 txq_cnt; ++ u32 txq_max; ++ ++ /* begin "rx thread" cacheline section */ ++ struct tg3_napi napi[TG3_IRQ_MAX_VECS]; ++ void (*write32_rx_mbox) (struct tg3 *, u32, ++ u32); ++ u32 rx_copy_thresh; ++ u32 rx_std_ring_mask; ++ u32 rx_jmb_ring_mask; ++ u32 rx_ret_ring_mask; ++ u32 rx_pending; ++ u32 rx_jumbo_pending; ++ u32 rx_std_max_post; ++ u32 rx_offset; ++ u32 rx_pkt_map_sz; ++ u32 rxq_req; ++ u32 rxq_cnt; ++ u32 rxq_max; ++#ifndef BCM_HAS_NEW_VLAN_INTERFACE ++ struct vlan_group *vlgrp; ++#endif ++ ++ bool rx_refill; ++ ++ /* begin "everything else" cacheline(s) section */ ++ unsigned long rx_dropped; ++ unsigned long tx_dropped; ++ struct rtnl_link_stats64 net_stats_prev; ++ struct tg3_ethtool_stats estats_prev; ++ ++ DECLARE_BITMAP(tg3_flags, TG3_FLAG_NUMBER_OF_FLAGS); ++ ++ union { ++ unsigned long phy_crc_errors; ++ unsigned long last_event_jiffies; ++ }; ++ ++ struct timer_list timer; ++ u16 timer_counter; ++ u16 timer_multiplier; ++ u32 timer_offset; ++ u16 asf_counter; ++ u16 asf_multiplier; ++ ++ /* 1 second counter for transient serdes link events */ ++ u32 serdes_counter; ++#define SERDES_AN_TIMEOUT_5704S 2 ++#define SERDES_PARALLEL_DET_TIMEOUT 1 ++#define SERDES_AN_TIMEOUT_5714S 1 ++ ++ struct tg3_link_config link_config; ++ struct tg3_bufmgr_config bufmgr_config; ++ ++ /* cache h/w values, often passed straight to h/w */ ++ u32 rx_mode; ++ u32 tx_mode; ++ u32 mac_mode; ++ u32 mi_mode; ++ u32 misc_host_ctrl; ++ u32 grc_mode; ++ u32 grc_local_ctrl; ++ u32 dma_rwctrl; ++ u32 coalesce_mode; ++ u32 pwrmgmt_thresh; ++ u32 rxptpctl; ++ ++ /* PCI block */ ++ u32 pci_chip_rev_id; ++ u16 pci_cmd; ++ u8 pci_cacheline_sz; ++ u8 pci_lat_timer; ++ ++ int pci_fn; ++ int pm_cap; ++ int msi_cap; ++ int pcix_cap; ++ int pcie_readrq; ++ ++#ifdef BCM_INCLUDE_PHYLIB_SUPPORT ++ struct mii_bus *mdio_bus; ++ int mdio_irq[PHY_MAX_ADDR]; ++#endif ++ int old_link; ++ ++ u8 phy_addr; ++ u8 phy_ape_lock; ++ ++ /* PHY info */ ++ u32 phy_id; ++#define TG3_PHY_ID_MASK 0xfffffff0 ++#define TG3_PHY_ID_BCM5400 0x60008040 ++#define TG3_PHY_ID_BCM5401 0x60008050 ++#define TG3_PHY_ID_BCM5411 0x60008070 ++#define TG3_PHY_ID_BCM5701 0x60008110 ++#define TG3_PHY_ID_BCM5703 0x60008160 ++#define TG3_PHY_ID_BCM5704 0x60008190 ++#define TG3_PHY_ID_BCM5705 0x600081a0 ++#define TG3_PHY_ID_BCM5750 0x60008180 ++#define TG3_PHY_ID_BCM5752 0x60008100 ++#define TG3_PHY_ID_BCM5714 0x60008340 ++#define TG3_PHY_ID_BCM5780 0x60008350 ++#define TG3_PHY_ID_BCM5755 0xbc050cc0 ++#define TG3_PHY_ID_BCM5787 0xbc050ce0 ++#define TG3_PHY_ID_BCM5756 0xbc050ed0 ++#define TG3_PHY_ID_BCM5784 0xbc050fa0 ++#define TG3_PHY_ID_BCM5761 0xbc050fd0 ++#define TG3_PHY_ID_BCM5718C 0x5c0d8a00 ++#define TG3_PHY_ID_BCM5718S 0xbc050ff0 ++#define TG3_PHY_ID_BCM57765 0x5c0d8a40 ++#define TG3_PHY_ID_BCM5719C 0x5c0d8a20 ++#define TG3_PHY_ID_BCM5720C 0x5c0d8b60 ++#define TG3_PHY_ID_BCM5762 0x85803780 ++#define TG3_PHY_ID_BCM5906 0xdc00ac40 ++#define TG3_PHY_ID_BCM8002 0x60010140 ++#ifndef BCM_INCLUDE_PHYLIB_SUPPORT ++#define TG3_PHY_ID_BCM50610 0xbc050d60 ++#define TG3_PHY_ID_BCM50610M 0xbc050d70 ++#define TG3_PHY_ID_BCM50612E 0x5c0d8a60 ++#define TG3_PHY_ID_BCMAC131 0xbc050c70 ++#define TG3_PHY_ID_RTL8211C 0xc8007110 ++#define TG3_PHY_ID_RTL8201E 0xc800aaa0 ++#define TG3_PHY_ID_BCM57780 0x5c0d8990 ++#endif /* BCM_INCLUDE_PHYLIB_SUPPORT */ ++#define TG3_PHY_ID_INVALID 0xffffffff ++ ++#define PHY_ID_RTL8211C 0x001cc910 ++#define PHY_ID_RTL8201E 0x00008200 ++ ++#define TG3_PHY_ID_REV_MASK 0x0000000f ++#define TG3_PHY_REV_BCM5401_B0 0x1 ++ ++ /* This macro assumes the passed PHY ID is ++ * already masked with TG3_PHY_ID_MASK. ++ */ ++#define TG3_KNOWN_PHY_ID(X) \ ++ ((X) == TG3_PHY_ID_BCM5400 || (X) == TG3_PHY_ID_BCM5401 || \ ++ (X) == TG3_PHY_ID_BCM5411 || (X) == TG3_PHY_ID_BCM5701 || \ ++ (X) == TG3_PHY_ID_BCM5703 || (X) == TG3_PHY_ID_BCM5704 || \ ++ (X) == TG3_PHY_ID_BCM5705 || (X) == TG3_PHY_ID_BCM5750 || \ ++ (X) == TG3_PHY_ID_BCM5752 || (X) == TG3_PHY_ID_BCM5714 || \ ++ (X) == TG3_PHY_ID_BCM5780 || (X) == TG3_PHY_ID_BCM5787 || \ ++ (X) == TG3_PHY_ID_BCM5755 || (X) == TG3_PHY_ID_BCM5756 || \ ++ (X) == TG3_PHY_ID_BCM5906 || (X) == TG3_PHY_ID_BCM5761 || \ ++ (X) == TG3_PHY_ID_BCM5718C || (X) == TG3_PHY_ID_BCM5718S || \ ++ (X) == TG3_PHY_ID_BCM57765 || (X) == TG3_PHY_ID_BCM5719C || \ ++ (X) == TG3_PHY_ID_BCM5720C || (X) == TG3_PHY_ID_BCM5762 || \ ++ (X) == TG3_PHY_ID_BCM8002 || \ ++ (X) == TG3_PHY_ID_BCM50610 || (X) == TG3_PHY_ID_BCM50610M || \ ++ (X) == TG3_PHY_ID_BCM50612E || (X) == TG3_PHY_ID_BCMAC131 || \ ++ (X) == TG3_PHY_ID_BCM57780) ++ ++ u32 phy_flags; ++#define TG3_PHYFLG_USER_CONFIGURED 0x00000001 ++#define TG3_PHYFLG_IS_LOW_POWER 0x00000002 ++#define TG3_PHYFLG_IS_CONNECTED 0x00000004 ++#define TG3_PHYFLG_USE_MI_INTERRUPT 0x00000008 ++#define TG3_PHYFLG_PHY_SERDES 0x00000010 ++#define TG3_PHYFLG_MII_SERDES 0x00000020 ++#define TG3_PHYFLG_ANY_SERDES (TG3_PHYFLG_PHY_SERDES | \ ++ TG3_PHYFLG_MII_SERDES) ++#define TG3_PHYFLG_IS_FET 0x00000040 ++#define TG3_PHYFLG_10_100_ONLY 0x00000080 ++#define TG3_PHYFLG_ENABLE_APD 0x00000100 ++#define TG3_PHYFLG_CAPACITIVE_COUPLING 0x00000200 ++#define TG3_PHYFLG_NO_ETH_WIRE_SPEED 0x00000400 ++#define TG3_PHYFLG_JITTER_BUG 0x00000800 ++#define TG3_PHYFLG_ADJUST_TRIM 0x00001000 ++#define TG3_PHYFLG_ADC_BUG 0x00002000 ++#define TG3_PHYFLG_5704_A0_BUG 0x00004000 ++#define TG3_PHYFLG_BER_BUG 0x00008000 ++#define TG3_PHYFLG_SERDES_PREEMPHASIS 0x00010000 ++#define TG3_PHYFLG_PARALLEL_DETECT 0x00020000 ++#define TG3_PHYFLG_EEE_CAP 0x00040000 ++#define TG3_PHYFLG_1G_ON_VAUX_OK 0x00080000 ++#define TG3_PHYFLG_KEEP_LINK_ON_PWRDN 0x00100000 ++#define TG3_PHYFLG_MDIX_STATE 0x00200000 ++#define TG3_PHYFLG_DISABLE_1G_HD_ADV 0x00400000 ++ ++ u32 led_ctrl; ++ u32 phy_otp; ++ u32 setlpicnt; ++ u8 rss_ind_tbl[TG3_RSS_INDIR_TBL_SIZE]; ++ ++#define TG3_BPN_SIZE 24 ++ char board_part_number[TG3_BPN_SIZE]; ++#define TG3_VER_SIZE ETHTOOL_FWVERS_LEN ++ char fw_ver[TG3_VER_SIZE]; ++ u32 nic_sram_data_cfg; ++ u32 pci_clock_ctrl; ++ struct pci_dev *pdev_peer; ++ ++ struct tg3_hw_stats *hw_stats; ++ dma_addr_t stats_mapping; ++ struct work_struct reset_task; ++ ++ int nvram_lock_cnt; ++ u32 nvram_size; ++#define TG3_NVRAM_SIZE_2KB 0x00000800 ++#define TG3_NVRAM_SIZE_64KB 0x00010000 ++#define TG3_NVRAM_SIZE_128KB 0x00020000 ++#define TG3_NVRAM_SIZE_256KB 0x00040000 ++#define TG3_NVRAM_SIZE_512KB 0x00080000 ++#define TG3_NVRAM_SIZE_1MB 0x00100000 ++#define TG3_NVRAM_SIZE_2MB 0x00200000 ++ ++ u32 nvram_pagesize; ++ u32 nvram_jedecnum; ++ ++#define JEDEC_ATMEL 0x1f ++#define JEDEC_ST 0x20 ++#define JEDEC_SAIFUN 0x4f ++#define JEDEC_SST 0xbf ++#define JEDEC_MACRONIX 0xc2 ++ ++#define ATMEL_AT24C02_CHIP_SIZE TG3_NVRAM_SIZE_2KB ++#define ATMEL_AT24C02_PAGE_SIZE (8) ++ ++#define ATMEL_AT24C64_CHIP_SIZE TG3_NVRAM_SIZE_64KB ++#define ATMEL_AT24C64_PAGE_SIZE (32) ++ ++#define ATMEL_AT24C512_CHIP_SIZE TG3_NVRAM_SIZE_512KB ++#define ATMEL_AT24C512_PAGE_SIZE (128) ++ ++#define ATMEL_AT45DB0X1B_PAGE_POS 9 ++#define ATMEL_AT45DB0X1B_PAGE_SIZE 264 ++ ++#define ATMEL_AT25F512_PAGE_SIZE 256 ++ ++#define ST_M45PEX0_PAGE_SIZE 256 ++ ++#define SAIFUN_SA25F0XX_PAGE_SIZE 256 ++ ++#define SST_25VF0X0_PAGE_SIZE 4098 ++ ++ unsigned int irq_max; ++ unsigned int irq_cnt; ++ ++ struct ethtool_coalesce coal; ++ struct ethtool_eee eee; ++ ++ /* firmware info */ ++ const char *fw_needed; ++ const struct tg3_firmware *fw; ++ u32 fw_len; /* includes BSS */ ++ ++#if defined(__VMKLNX__) ++ struct tg3_vmware vmware; ++#endif ++#ifndef BCM_HAS_PCI_PCIE_CAP ++ int pcie_cap; ++#endif ++#if (LINUX_VERSION_CODE < 0x2060a) ++ u32 pci_cfg_state[64 / sizeof(u32)]; ++#endif ++#ifndef BCM_HAS_GET_STATS64 ++ struct rtnl_link_stats64 net_stats; ++#endif ++#if IS_ENABLED(CONFIG_HWMON) && !defined(__VMKLNX__) ++#if (LINUX_VERSION_CODE > 0x20618) ++ struct device *hwmon_dev; ++#else ++ struct class_device *hwmon_dev; ++#endif ++#endif ++ ++ bool link_up; ++#if defined(__VMKLNX__) && VMWARE_ESX_DDK_VERSION >= 55000 ++ int nic_idx; ++#endif ++ u32 ape_hb; ++ unsigned long ape_hb_interval; ++ unsigned long ape_hb_jiffies; ++ unsigned long dma_4g_cross; ++#if !defined(__VMKLNX__) ++ unsigned long recoverable_err_jiffies; ++#define RECOVERABLE_ERR_10SEC 10000 ++ unsigned long recoverable_err_interval; ++ u64 recoverable_err; ++ u64 unrecoverable_err; ++#endif ++}; ++ ++/* Accessor macros for chip and asic attributes ++ * ++ * nb: Using static inlines equivalent to the accessor macros generates ++ * larger object code with gcc 4.7. ++ * Using statement expression macros to check tp with ++ * typecheck(struct tg3 *, tp) also creates larger objects. ++ */ ++#define tg3_chip_rev_id(tp) \ ++ ((tp)->pci_chip_rev_id) ++#define tg3_asic_rev(tp) \ ++ ((tp)->pci_chip_rev_id >> 12) ++#define tg3_chip_rev(tp) \ ++ ((tp)->pci_chip_rev_id >> 8) ++ ++#endif /* !(_T3_H) */ +diff --git a/drivers/net/ethernet/broadcom/tg3/tg3_compat.h b/drivers/net/ethernet/broadcom/tg3/tg3_compat.h +new file mode 100644 +index 0000000..40cc207 +--- /dev/null ++++ b/drivers/net/ethernet/broadcom/tg3/tg3_compat.h +@@ -0,0 +1,2172 @@ ++/* Copyright (C) 2008-2015 Broadcom Corporation. */ ++ ++#ifdef CONFIG_X86 ++#undef NET_IP_ALIGN ++#define NET_IP_ALIGN 0 ++#endif ++ ++#if !defined(__maybe_unused) ++#define __maybe_unused /* unimplemented */ ++#endif ++ ++#if !defined(__iomem) ++#define __iomem ++#endif ++ ++#ifndef __always_unused ++#define __always_unused ++#endif ++ ++#ifndef __acquires ++#define __acquires(x) ++#endif ++ ++#ifndef __releases ++#define __releases(x) ++#endif ++ ++#ifndef mmiowb ++#define mmiowb() ++#endif ++ ++#ifndef WARN_ON ++#define WARN_ON(x) ++#endif ++ ++#ifndef MODULE_VERSION ++#define MODULE_VERSION(version) ++#endif ++ ++#ifndef SET_MODULE_OWNER ++#define SET_MODULE_OWNER(dev) do { } while (0) ++#endif ++ ++#ifndef ARRAY_SIZE ++#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0])) ++#endif ++ ++#ifndef DIV_ROUND_UP ++#define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d)) ++#endif ++ ++#ifndef __ALIGN_MASK ++#define __ALIGN_MASK(x,mask) (((x)+(mask))&~(mask)) ++#endif ++ ++#ifndef ALIGN ++#define ALIGN(x,a) __ALIGN_MASK(x,(typeof(x))(a)-1) ++#endif ++ ++#ifndef BCM_HAS_BOOL ++typedef int bool; ++#define false 0 ++#define true 1 ++#endif ++ ++#ifndef BCM_HAS_LE32 ++typedef u32 __le32; ++typedef u32 __be32; ++#endif ++ ++#ifndef BCM_HAS_RESOURCE_SIZE_T ++typedef unsigned long resource_size_t; ++#endif ++ ++#ifndef IRQ_RETVAL ++typedef void irqreturn_t; ++#define IRQ_RETVAL(x) ++#define IRQ_HANDLED ++#define IRQ_NONE ++#endif ++ ++#ifndef IRQF_SHARED ++#define IRQF_SHARED SA_SHIRQ ++#endif ++ ++#ifndef IRQF_SAMPLE_RANDOM ++#define IRQF_SAMPLE_RANDOM SA_SAMPLE_RANDOM ++#endif ++ ++#if (LINUX_VERSION_CODE <= 0x020600) ++#define schedule_work(x) schedule_task(x) ++#define work_struct tq_struct ++#define INIT_WORK(x, y, z) INIT_TQUEUE(x, y, z) ++#endif ++ ++#ifndef BCM_HAS_KZALLOC ++static inline void *kzalloc(size_t size, int flags) ++{ ++ void * memptr = kmalloc(size, flags); ++ if (memptr) ++ memset(memptr, 0, size); ++ ++ return memptr; ++} ++#endif ++ ++#ifndef USEC_PER_SEC ++#define USEC_PER_SEC 1000000 ++#endif ++ ++#ifndef MSEC_PER_SEC ++#define MSEC_PER_SEC 1000 ++#endif ++ ++#ifndef MAX_JIFFY_OFFSET ++#define MAX_JIFFY_OFFSET ((LONG_MAX >> 1)-1) ++#endif ++ ++#ifndef BCM_HAS_JIFFIES_TO_USECS ++static unsigned int inline jiffies_to_usecs(const unsigned long j) ++{ ++#if HZ <= USEC_PER_SEC && !(USEC_PER_SEC % HZ) ++ return (USEC_PER_SEC / HZ) * j; ++#elif HZ > USEC_PER_SEC && !(HZ % USEC_PER_SEC) ++ return (j + (HZ / USEC_PER_SEC) - 1)/(HZ / USEC_PER_SEC); ++#else ++ return (j * USEC_PER_SEC) / HZ; ++#endif ++} ++#endif /* BCM_HAS_JIFFIES_TO_USECS */ ++ ++#ifndef BCM_HAS_USECS_TO_JIFFIES ++static unsigned long usecs_to_jiffies(const unsigned int u) ++{ ++ if (u > jiffies_to_usecs(MAX_JIFFY_OFFSET)) ++ return MAX_JIFFY_OFFSET; ++#if HZ <= USEC_PER_SEC && !(USEC_PER_SEC % HZ) ++ return (u + (USEC_PER_SEC / HZ) - 1) / (USEC_PER_SEC / HZ); ++#elif HZ > USEC_PER_SEC && !(HZ % USEC_PER_SEC) ++ return u * (HZ / USEC_PER_SEC); ++#else ++ return (u * HZ + USEC_PER_SEC - 1) / USEC_PER_SEC; ++#endif ++} ++#endif /* BCM_HAS_USECS_TO_JIFFIES */ ++ ++#ifndef BCM_HAS_MSECS_TO_JIFFIES ++static unsigned long msecs_to_jiffies(const unsigned int m) ++{ ++#if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ) ++ /* ++ * HZ is equal to or smaller than 1000, and 1000 is a nice ++ * round multiple of HZ, divide with the factor between them, ++ * but round upwards: ++ */ ++ return (m + (MSEC_PER_SEC / HZ) - 1) / (MSEC_PER_SEC / HZ); ++#elif HZ > MSEC_PER_SEC && !(HZ % MSEC_PER_SEC) ++ /* ++ * HZ is larger than 1000, and HZ is a nice round multiple of ++ * 1000 - simply multiply with the factor between them. ++ * ++ * But first make sure the multiplication result cannot ++ * overflow: ++ */ ++ if (m > jiffies_to_msecs(MAX_JIFFY_OFFSET)) ++ return MAX_JIFFY_OFFSET; ++ ++ return m * (HZ / MSEC_PER_SEC); ++#else ++ /* ++ * Generic case - multiply, round and divide. But first ++ * check that if we are doing a net multiplication, that ++ * we wouldn't overflow: ++ */ ++ if (HZ > MSEC_PER_SEC && m > jiffies_to_msecs(MAX_JIFFY_OFFSET)) ++ return MAX_JIFFY_OFFSET; ++ ++ return (m * HZ + MSEC_PER_SEC - 1) / MSEC_PER_SEC; ++#endif ++} ++#endif /* BCM_HAS_MSECS_TO_JIFFIES */ ++ ++#ifndef BCM_HAS_MSLEEP ++static void msleep(unsigned int msecs) ++{ ++ unsigned long timeout = msecs_to_jiffies(msecs) + 1; ++ ++ while (timeout) { ++ __set_current_state(TASK_UNINTERRUPTIBLE); ++ timeout = schedule_timeout(timeout); ++ } ++} ++#endif /* BCM_HAS_MSLEEP */ ++ ++#ifndef BCM_HAS_MSLEEP_INTERRUPTIBLE ++static unsigned long msleep_interruptible(unsigned int msecs) ++{ ++ unsigned long timeout = msecs_to_jiffies(msecs) + 1; ++ ++ while (timeout) { ++ __set_current_state(TASK_UNINTERRUPTIBLE); ++ timeout = schedule_timeout(timeout); ++ } ++ ++ return 0; ++} ++#endif /* BCM_HAS_MSLEEP_INTERRUPTIBLE */ ++ ++#ifndef printk_once ++#define printk_once(x...) ({ \ ++ static bool tg3___print_once = false; \ ++ \ ++ if (!tg3___print_once) { \ ++ tg3___print_once = true; \ ++ printk(x); \ ++ } \ ++}) ++#endif ++ ++#if !defined(BCM_HAS_DEV_DRIVER_STRING) || defined(__VMKLNX__) ++#define dev_driver_string(dev) "tg3" ++#endif ++ ++#if !defined(BCM_HAS_DEV_NAME) || defined(__VMKLNX__) ++#define dev_name(dev) "" ++#endif ++ ++#if defined(dev_printk) && ((LINUX_VERSION_CODE < 0x020609) || defined(__VMKLNX__)) ++/* ++ * SLES 9 and VMWare do not populate the pdev->dev.bus_id string soon ++ * enough for driver use during boot. Use our own format instead. ++ */ ++#undef dev_printk ++#endif ++ ++#ifndef dev_printk ++#define dev_printk(level, dev, format, arg...) \ ++ printk(level "%s %s: " format , dev_driver_string(dev) , \ ++ dev_name(dev) , ## arg) ++#endif ++ ++#ifndef dev_err ++#define dev_err(dev, format, arg...) \ ++ dev_printk(KERN_ERR , dev , format , ## arg) ++#endif ++ ++#ifndef dev_warn ++#define dev_warn(dev, format, arg...) \ ++ dev_printk(KERN_WARNING , dev , format , ## arg) ++#endif ++ ++#ifndef BCM_HAS_PCI_IOREMAP_BAR ++static inline void * pci_ioremap_bar(struct pci_dev *pdev, int bar) ++{ ++ resource_size_t base, size; ++ ++ if (!(pci_resource_flags(pdev, bar) & IORESOURCE_MEM)) { ++ printk(KERN_ERR ++ "Cannot find proper PCI device base address for BAR %d.\n", ++ bar); ++ return NULL; ++ } ++ ++ base = pci_resource_start(pdev, bar); ++ size = pci_resource_len(pdev, bar); ++ ++ return ioremap_nocache(base, size); ++} ++#endif ++ ++#ifndef DEFINE_PCI_DEVICE_TABLE ++#define DEFINE_PCI_DEVICE_TABLE(x) struct pci_device_id x[] ++#endif ++ ++#if (LINUX_VERSION_CODE < 0x020547) ++#define pci_set_consistent_dma_mask(pdev, mask) (0) ++#endif ++ ++#if (LINUX_VERSION_CODE < 0x020600) ++#define pci_get_device(x, y, z) pci_find_device(x, y, z) ++#define pci_get_slot(x, y) pci_find_slot((x)->number, y) ++#define pci_dev_put(x) ++#endif ++ ++#if (LINUX_VERSION_CODE < 0x020605) ++#define pci_dma_sync_single_for_cpu(pdev, map, len, dir) \ ++ pci_dma_sync_single(pdev, map, len, dir) ++#define pci_dma_sync_single_for_device(pdev, map, len, dir) ++#endif ++ ++#ifndef PCI_DEVICE ++#define PCI_DEVICE(vend,dev) \ ++ .vendor = (vend), .device = (dev), \ ++ .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID ++#endif ++ ++#ifndef PCI_DEVICE_SUB ++#define PCI_DEVICE_SUB(vend, dev, subvend, subdev) \ ++ .vendor = (vend), .device = (dev), \ ++ .subvendor = (subvend), .subdevice = (subdev) ++#endif ++ ++#ifndef PCI_DEVICE_ID_TIGON3_5704S_2 ++#define PCI_DEVICE_ID_TIGON3_5704S_2 0x1649 ++#endif ++ ++#ifndef PCI_DEVICE_ID_TIGON3_5705F ++#define PCI_DEVICE_ID_TIGON3_5705F 0x166e ++#endif ++ ++#ifndef PCI_DEVICE_ID_TIGON3_5720 ++#define PCI_DEVICE_ID_TIGON3_5720 0x1658 ++#endif ++ ++#ifndef PCI_DEVICE_ID_TIGON3_5721 ++#define PCI_DEVICE_ID_TIGON3_5721 0x1659 ++#endif ++ ++#ifndef PCI_DEVICE_ID_TIGON3_5750 ++#define PCI_DEVICE_ID_TIGON3_5750 0x1676 ++#endif ++ ++#ifndef PCI_DEVICE_ID_TIGON3_5751 ++#define PCI_DEVICE_ID_TIGON3_5751 0x1677 ++#endif ++ ++#ifndef PCI_DEVICE_ID_TIGON3_5750M ++#define PCI_DEVICE_ID_TIGON3_5750M 0x167c ++#endif ++ ++#ifndef PCI_DEVICE_ID_TIGON3_5751M ++#define PCI_DEVICE_ID_TIGON3_5751M 0x167d ++#endif ++ ++#ifndef PCI_DEVICE_ID_TIGON3_5751F ++#define PCI_DEVICE_ID_TIGON3_5751F 0x167e ++#endif ++ ++#ifndef PCI_DEVICE_ID_TIGON3_5789 ++#define PCI_DEVICE_ID_TIGON3_5789 0x169d ++#endif ++ ++#ifndef PCI_DEVICE_ID_TIGON3_5753 ++#define PCI_DEVICE_ID_TIGON3_5753 0x16f7 ++#endif ++ ++#ifndef PCI_DEVICE_ID_TIGON3_5753M ++#define PCI_DEVICE_ID_TIGON3_5753M 0x16fd ++#endif ++ ++#ifndef PCI_DEVICE_ID_TIGON3_5753F ++#define PCI_DEVICE_ID_TIGON3_5753F 0x16fe ++#endif ++ ++#ifndef PCI_DEVICE_ID_TIGON3_5781 ++#define PCI_DEVICE_ID_TIGON3_5781 0x16dd ++#endif ++ ++#ifndef PCI_DEVICE_ID_TIGON3_5752 ++#define PCI_DEVICE_ID_TIGON3_5752 0x1600 ++#endif ++ ++#ifndef PCI_DEVICE_ID_TIGON3_5752M ++#define PCI_DEVICE_ID_TIGON3_5752M 0x1601 ++#endif ++ ++#ifndef PCI_DEVICE_ID_TIGON3_5714 ++#define PCI_DEVICE_ID_TIGON3_5714 0x1668 ++#endif ++ ++#ifndef PCI_DEVICE_ID_TIGON3_5714S ++#define PCI_DEVICE_ID_TIGON3_5714S 0x1669 ++#endif ++ ++#ifndef PCI_DEVICE_ID_TIGON3_5780 ++#define PCI_DEVICE_ID_TIGON3_5780 0x166a ++#endif ++ ++#ifndef PCI_DEVICE_ID_TIGON3_5780S ++#define PCI_DEVICE_ID_TIGON3_5780S 0x166b ++#endif ++ ++#ifndef PCI_DEVICE_ID_TIGON3_5715 ++#define PCI_DEVICE_ID_TIGON3_5715 0x1678 ++#endif ++ ++#ifndef PCI_DEVICE_ID_TIGON3_5715S ++#define PCI_DEVICE_ID_TIGON3_5715S 0x1679 ++#endif ++ ++#ifndef PCI_DEVICE_ID_TIGON3_5756 ++#define PCI_DEVICE_ID_TIGON3_5756 0x1674 ++#endif ++ ++#ifndef PCI_DEVICE_ID_TIGON3_5754 ++#define PCI_DEVICE_ID_TIGON3_5754 0x167a ++#endif ++ ++#ifndef PCI_DEVICE_ID_TIGON3_5754M ++#define PCI_DEVICE_ID_TIGON3_5754M 0x1672 ++#endif ++ ++#ifndef PCI_DEVICE_ID_TIGON3_5755 ++#define PCI_DEVICE_ID_TIGON3_5755 0x167b ++#endif ++ ++#ifndef PCI_DEVICE_ID_TIGON3_5755M ++#define PCI_DEVICE_ID_TIGON3_5755M 0x1673 ++#endif ++ ++#ifndef PCI_DEVICE_ID_TIGON3_5722 ++#define PCI_DEVICE_ID_TIGON3_5722 0x165a ++#endif ++ ++#ifndef PCI_DEVICE_ID_TIGON3_5786 ++#define PCI_DEVICE_ID_TIGON3_5786 0x169a ++#endif ++ ++#ifndef PCI_DEVICE_ID_TIGON3_5787M ++#define PCI_DEVICE_ID_TIGON3_5787M 0x1693 ++#endif ++ ++#ifndef PCI_DEVICE_ID_TIGON3_5787 ++#define PCI_DEVICE_ID_TIGON3_5787 0x169b ++#endif ++ ++#ifndef PCI_DEVICE_ID_TIGON3_5787F ++#define PCI_DEVICE_ID_TIGON3_5787F 0x167f ++#endif ++ ++#ifndef PCI_DEVICE_ID_TIGON3_5906 ++#define PCI_DEVICE_ID_TIGON3_5906 0x1712 ++#endif ++ ++#ifndef PCI_DEVICE_ID_TIGON3_5906M ++#define PCI_DEVICE_ID_TIGON3_5906M 0x1713 ++#endif ++ ++#ifndef PCI_DEVICE_ID_TIGON3_5784 ++#define PCI_DEVICE_ID_TIGON3_5784 0x1698 ++#endif ++ ++#ifndef PCI_DEVICE_ID_TIGON3_5764 ++#define PCI_DEVICE_ID_TIGON3_5764 0x1684 ++#endif ++ ++#ifndef PCI_DEVICE_ID_TIGON3_5723 ++#define PCI_DEVICE_ID_TIGON3_5723 0x165b ++#endif ++ ++#ifndef PCI_DEVICE_ID_TIGON3_5761 ++#define PCI_DEVICE_ID_TIGON3_5761 0x1681 ++#endif ++ ++#ifndef PCI_DEVICE_ID_TIGON3_5761E ++#define PCI_DEVICE_ID_TIGON3_5761E 0x1680 ++#endif ++ ++#ifndef PCI_DEVICE_ID_APPLE_TIGON3 ++#define PCI_DEVICE_ID_APPLE_TIGON3 0x1645 ++#endif ++ ++#ifndef PCI_DEVICE_ID_APPLE_UNI_N_PCI15 ++#define PCI_DEVICE_ID_APPLE_UNI_N_PCI15 0x002e ++#endif ++ ++#ifndef PCI_DEVICE_ID_VIA_8385_0 ++#define PCI_DEVICE_ID_VIA_8385_0 0x3188 ++#endif ++ ++#ifndef PCI_DEVICE_ID_AMD_8131_BRIDGE ++#define PCI_DEVICE_ID_AMD_8131_BRIDGE 0x7450 ++#endif ++ ++#ifndef PCI_DEVICE_ID_SERVERWORKS_EPB ++#define PCI_DEVICE_ID_SERVERWORKS_EPB 0x0103 ++#endif ++ ++#ifndef PCI_VENDOR_ID_ARIMA ++#define PCI_VENDOR_ID_ARIMA 0x161f ++#endif ++ ++#ifndef PCI_DEVICE_ID_INTEL_PXH_0 ++#define PCI_DEVICE_ID_INTEL_PXH_0 0x0329 ++#endif ++ ++#ifndef PCI_DEVICE_ID_INTEL_PXH_1 ++#define PCI_DEVICE_ID_INTEL_PXH_1 0x032A ++#endif ++ ++#ifndef PCI_VENDOR_ID_LENOVO ++#define PCI_VENDOR_ID_LENOVO 0x17aa ++#endif ++ ++#ifndef PCI_D0 ++typedef u32 pm_message_t; ++typedef u32 pci_power_t; ++#define PCI_D0 0 ++#define PCI_D1 1 ++#define PCI_D2 2 ++#define PCI_D3hot 3 ++#endif ++ ++#ifndef PCI_D3cold ++#define PCI_D3cold 4 ++#endif ++ ++#ifndef DMA_64BIT_MASK ++#define DMA_64BIT_MASK ((u64) 0xffffffffffffffffULL) ++#endif ++ ++#ifndef DMA_40BIT_MASK ++#define DMA_40BIT_MASK ((u64) 0x000000ffffffffffULL) ++#endif ++ ++#ifndef DMA_32BIT_MASK ++#define DMA_32BIT_MASK ((u64) 0x00000000ffffffffULL) ++#endif ++ ++#ifndef DMA_BIT_MASK ++#define DMA_BIT_MASK(n) DMA_ ##n ##BIT_MASK ++#endif ++ ++#ifndef DEFINE_DMA_UNMAP_ADDR ++#define DEFINE_DMA_UNMAP_ADDR DECLARE_PCI_UNMAP_ADDR ++#endif ++ ++#if !defined(BCM_HAS_DMA_UNMAP_ADDR) ++#define dma_unmap_addr pci_unmap_addr ++#endif ++ ++#if !defined(BCM_HAS_DMA_UNMAP_ADDR_SET) ++#define dma_unmap_addr_set pci_unmap_addr_set ++#endif ++ ++#if !defined(BCM_HAS_PCI_TARGET_STATE) && !defined(BCM_HAS_PCI_CHOOSE_STATE) ++static inline pci_power_t pci_choose_state(struct pci_dev *dev, ++ pm_message_t state) ++{ ++ return state; ++} ++#endif ++ ++#ifndef BCM_HAS_PCI_ENABLE_WAKE ++static int pci_enable_wake(struct pci_dev *dev, pci_power_t state, int enable) ++{ ++ int pm_cap; ++ u16 pmcsr; ++ ++ pm_cap = pci_find_capability(dev, PCI_CAP_ID_PM); ++ if (pm_cap == 0) ++ return -EIO; ++ ++ pci_read_config_word(dev, pm_cap + PCI_PM_CTRL, &pmcsr); ++ ++ /* Clear PME_Status by writing 1 to it */ ++ pmcsr |= PCI_PM_CTRL_PME_STATUS; ++ ++ if (enable) ++ pmcsr |= PCI_PM_CTRL_PME_ENABLE; ++ else ++ pmcsr &= ~PCI_PM_CTRL_PME_ENABLE; ++ ++ pci_write_config_word(dev, pm_cap + PCI_PM_CTRL, pmcsr); ++ ++ return 0; ++} ++#endif /* BCM_HAS_PCI_ENABLE_WAKE */ ++ ++#ifndef BCM_HAS_PCI_WAKE_FROM_D3 ++#ifndef BCM_HAS_PCI_PME_CAPABLE ++static bool pci_pme_capable(struct pci_dev *dev, pci_power_t state) ++{ ++ int pm_cap; ++ u16 caps; ++ bool ret = false; ++ ++ pm_cap = pci_find_capability(dev, PCI_CAP_ID_PM); ++ if (pm_cap == 0) ++ goto done; ++ ++ pci_read_config_word(dev, pm_cap + PCI_PM_PMC, &caps); ++ ++ if (state == PCI_D3cold && ++ (caps & PCI_PM_CAP_PME_D3cold)) ++ ret = true; ++ ++done: ++ return ret; ++} ++#endif /* BCM_HAS_PCI_PME_CAPABLE */ ++ ++static int pci_wake_from_d3(struct pci_dev *dev, bool enable) ++{ ++ return pci_pme_capable(dev, PCI_D3cold) ? ++ pci_enable_wake(dev, PCI_D3cold, enable) : ++ pci_enable_wake(dev, PCI_D3hot, enable); ++} ++#endif /* BCM_HAS_PCI_WAKE_FROM_D3 */ ++ ++#ifndef BCM_HAS_PCI_SET_POWER_STATE ++static int pci_set_power_state(struct pci_dev *dev, pci_power_t state) ++{ ++ int pm_cap; ++ u16 pmcsr; ++ ++ if (state < PCI_D0 || state > PCI_D3hot) ++ return -EINVAL; ++ ++ pm_cap = pci_find_capability(dev, PCI_CAP_ID_PM); ++ if (pm_cap == 0) ++ return -EIO; ++ ++ pci_read_config_word(dev, pm_cap + PCI_PM_CTRL, &pmcsr); ++ ++ pmcsr &= ~(PCI_PM_CTRL_STATE_MASK); ++ pmcsr |= state; ++ ++ pci_write_config_word(dev, pm_cap + PCI_PM_CTRL, pmcsr); ++ ++ msleep(10); ++ ++ return 0; ++} ++#endif /* BCM_HAS_PCI_SET_POWER_STATE */ ++ ++#ifdef __VMKLNX__ ++/* VMWare disables CONFIG_PM in their kernel configs. ++ * This renders WOL inop, because device_may_wakeup() always returns false. ++ */ ++#undef BCM_HAS_DEVICE_WAKEUP_API ++#endif ++ ++#ifndef BCM_HAS_DEVICE_WAKEUP_API ++#undef device_init_wakeup ++#define device_init_wakeup(dev, val) ++#undef device_can_wakeup ++#define device_can_wakeup(dev) 1 ++#undef device_set_wakeup_enable ++#define device_set_wakeup_enable(dev, val) ++#undef device_may_wakeup ++#define device_may_wakeup(dev) 1 ++#endif /* BCM_HAS_DEVICE_WAKEUP_API */ ++ ++#ifndef BCM_HAS_DEVICE_SET_WAKEUP_CAPABLE ++#define device_set_wakeup_capable(dev, val) ++#endif /* BCM_HAS_DEVICE_SET_WAKEUP_CAPABLE */ ++ ++ ++#ifndef PCI_X_CMD_READ_2K ++#define PCI_X_CMD_READ_2K 0x0008 ++#endif ++#ifndef PCI_CAP_ID_EXP ++#define PCI_CAP_ID_EXP 0x10 ++#endif ++#ifndef PCI_EXP_LNKCTL ++#define PCI_EXP_LNKCTL 16 ++#endif ++#ifndef PCI_EXP_LNKCTL_CLKREQ_EN ++#define PCI_EXP_LNKCTL_CLKREQ_EN 0x100 ++#endif ++ ++#ifndef PCI_EXP_DEVCTL_NOSNOOP_EN ++#define PCI_EXP_DEVCTL_NOSNOOP_EN 0x0800 ++#endif ++ ++#ifndef PCI_EXP_DEVCTL_RELAX_EN ++#define PCI_EXP_DEVCTL_RELAX_EN 0x0010 ++#endif ++ ++#ifndef PCI_EXP_DEVCTL_PAYLOAD ++#define PCI_EXP_DEVCTL_PAYLOAD 0x00e0 ++#endif ++ ++#ifndef PCI_EXP_DEVSTA ++#define PCI_EXP_DEVSTA 10 ++#define PCI_EXP_DEVSTA_CED 0x01 ++#define PCI_EXP_DEVSTA_NFED 0x02 ++#define PCI_EXP_DEVSTA_FED 0x04 ++#define PCI_EXP_DEVSTA_URD 0x08 ++#endif ++ ++#ifndef PCI_EXP_LNKSTA ++#define PCI_EXP_LNKSTA 18 ++#endif ++ ++#ifndef PCI_EXP_LNKSTA_CLS ++#define PCI_EXP_LNKSTA_CLS 0x000f ++#endif ++ ++#ifndef PCI_EXP_LNKSTA_CLS_2_5GB ++#define PCI_EXP_LNKSTA_CLS_2_5GB 0x01 ++#endif ++ ++#ifndef PCI_EXP_LNKSTA_CLS_5_0GB ++#define PCI_EXP_LNKSTA_CLS_5_0GB 0x02 ++#endif ++ ++#ifndef PCI_EXP_LNKSTA_NLW ++#define PCI_EXP_LNKSTA_NLW 0x03f0 ++#endif ++ ++#ifndef PCI_EXP_LNKSTA_NLW_SHIFT ++#define PCI_EXP_LNKSTA_NLW_SHIFT 4 ++#endif ++ ++#ifndef PCI_EXP_DEVCTL ++#define PCI_EXP_DEVCTL 8 ++#endif ++#ifndef PCI_EXP_DEVCTL_READRQ ++#define PCI_EXP_DEVCTL_READRQ 0x7000 ++#endif ++ ++#ifndef BCM_HAS_PCIE_GET_READRQ ++int pcie_get_readrq(struct pci_dev *dev) ++{ ++ int ret, cap; ++ u16 ctl; ++ ++ cap = pci_find_capability(dev, PCI_CAP_ID_EXP); ++ if (!cap) { ++ ret = -EINVAL; ++ goto out; ++ } ++ ++ ret = pci_read_config_word(dev, cap + PCI_EXP_DEVCTL, &ctl); ++ if (!ret) ++ ret = 128 << ((ctl & PCI_EXP_DEVCTL_READRQ) >> 12); ++ ++out: ++ return ret; ++} ++#endif /* BCM_HAS_PCIE_GET_READRQ */ ++ ++#ifndef BCM_HAS_PCIE_SET_READRQ ++static inline int pcie_set_readrq(struct pci_dev *dev, int rq) ++{ ++ int cap, err = -EINVAL; ++ u16 ctl, v; ++ ++ if (rq < 128 || rq > 4096 || (rq & (rq-1))) ++ goto out; ++ ++ v = (ffs(rq) - 8) << 12; ++ ++ cap = pci_find_capability(dev, PCI_CAP_ID_EXP); ++ if (!cap) ++ goto out; ++ ++ err = pci_read_config_word(dev, cap + PCI_EXP_DEVCTL, &ctl); ++ if (err) ++ goto out; ++ ++ if ((ctl & PCI_EXP_DEVCTL_READRQ) != v) { ++ ctl &= ~PCI_EXP_DEVCTL_READRQ; ++ ctl |= v; ++ err = pci_write_config_dword(dev, cap + PCI_EXP_DEVCTL, ctl); ++ } ++ ++out: ++ return err; ++} ++#endif /* BCM_HAS_PCIE_SET_READRQ */ ++ ++#ifndef BCM_HAS_PCI_READ_VPD ++#if !defined(PCI_CAP_ID_VPD) ++#define PCI_CAP_ID_VPD 0x03 ++#endif ++#if !defined(PCI_VPD_ADDR) ++#define PCI_VPD_ADDR 2 ++#endif ++#if !defined(PCI_VPD_DATA) ++#define PCI_VPD_DATA 4 ++#endif ++static inline ssize_t ++pci_read_vpd(struct pci_dev *dev, loff_t pos, size_t count, u8 *buf) ++{ ++ int i, vpd_cap; ++ ++ vpd_cap = pci_find_capability(dev, PCI_CAP_ID_VPD); ++ if (!vpd_cap) ++ return -ENODEV; ++ ++ for (i = 0; i < count; i += 4) { ++ u32 tmp, j = 0; ++ __le32 v; ++ u16 tmp16; ++ ++ pci_write_config_word(dev, vpd_cap + PCI_VPD_ADDR, i); ++ while (j++ < 100) { ++ pci_read_config_word(dev, vpd_cap + ++ PCI_VPD_ADDR, &tmp16); ++ if (tmp16 & 0x8000) ++ break; ++ msleep(1); ++ } ++ if (!(tmp16 & 0x8000)) ++ break; ++ ++ pci_read_config_dword(dev, vpd_cap + PCI_VPD_DATA, &tmp); ++ v = cpu_to_le32(tmp); ++ memcpy(&buf[i], &v, sizeof(v)); ++ } ++ ++ return i; ++} ++#endif /* BCM_HAS_PCI_READ_VPD */ ++ ++#ifndef PCI_VPD_RO_KEYWORD_CHKSUM ++#define PCI_VPD_RO_KEYWORD_CHKSUM "RV" ++#endif ++ ++#ifndef PCI_VPD_LRDT ++#define PCI_VPD_LRDT 0x80 /* Large Resource Data Type */ ++#define PCI_VPD_LRDT_ID(x) (x | PCI_VPD_LRDT) ++ ++/* Large Resource Data Type Tag Item Names */ ++#define PCI_VPD_LTIN_ID_STRING 0x02 /* Identifier String */ ++#define PCI_VPD_LTIN_RO_DATA 0x10 /* Read-Only Data */ ++#define PCI_VPD_LTIN_RW_DATA 0x11 /* Read-Write Data */ ++ ++#define PCI_VPD_LRDT_ID_STRING PCI_VPD_LRDT_ID(PCI_VPD_LTIN_ID_STRING) ++#define PCI_VPD_LRDT_RO_DATA PCI_VPD_LRDT_ID(PCI_VPD_LTIN_RO_DATA) ++#define PCI_VPD_LRDT_RW_DATA PCI_VPD_LRDT_ID(PCI_VPD_LTIN_RW_DATA) ++ ++/* Small Resource Data Type Tag Item Names */ ++#define PCI_VPD_STIN_END 0x78 /* End */ ++ ++#define PCI_VPD_SRDT_END PCI_VPD_STIN_END ++ ++#define PCI_VPD_SRDT_TIN_MASK 0x78 ++#define PCI_VPD_SRDT_LEN_MASK 0x07 ++ ++#define PCI_VPD_LRDT_TAG_SIZE 3 ++#define PCI_VPD_SRDT_TAG_SIZE 1 ++ ++#define PCI_VPD_INFO_FLD_HDR_SIZE 3 ++ ++#define PCI_VPD_RO_KEYWORD_PARTNO "PN" ++#define PCI_VPD_RO_KEYWORD_MFR_ID "MN" ++#define PCI_VPD_RO_KEYWORD_VENDOR0 "V0" ++ ++/** ++ * pci_vpd_lrdt_size - Extracts the Large Resource Data Type length ++ * @lrdt: Pointer to the beginning of the Large Resource Data Type tag ++ * ++ * Returns the extracted Large Resource Data Type length. ++ */ ++static inline u16 pci_vpd_lrdt_size(const u8 *lrdt) ++{ ++ return (u16)lrdt[1] + ((u16)lrdt[2] << 8); ++} ++ ++/** ++ * pci_vpd_srdt_size - Extracts the Small Resource Data Type length ++ * @lrdt: Pointer to the beginning of the Small Resource Data Type tag ++ * ++ * Returns the extracted Small Resource Data Type length. ++ */ ++static inline u8 pci_vpd_srdt_size(const u8 *srdt) ++{ ++ return (*srdt) & PCI_VPD_SRDT_LEN_MASK; ++} ++ ++/** ++ * pci_vpd_info_field_size - Extracts the information field length ++ * @lrdt: Pointer to the beginning of an information field header ++ * ++ * Returns the extracted information field length. ++ */ ++static inline u8 pci_vpd_info_field_size(const u8 *info_field) ++{ ++ return info_field[2]; ++} ++ ++static int pci_vpd_find_tag(const u8 *buf, unsigned int off, unsigned int len, u8 rdt) ++{ ++ int i; ++ ++ for (i = off; i < len; ) { ++ u8 val = buf[i]; ++ ++ if (val & PCI_VPD_LRDT) { ++ /* Don't return success of the tag isn't complete */ ++ if (i + PCI_VPD_LRDT_TAG_SIZE > len) ++ break; ++ ++ if (val == rdt) ++ return i; ++ ++ i += PCI_VPD_LRDT_TAG_SIZE + ++ pci_vpd_lrdt_size(&buf[i]); ++ } else { ++ u8 tag = val & ~PCI_VPD_SRDT_LEN_MASK; ++ ++ if (tag == rdt) ++ return i; ++ ++ if (tag == PCI_VPD_SRDT_END) ++ break; ++ ++ i += PCI_VPD_SRDT_TAG_SIZE + ++ pci_vpd_srdt_size(&buf[i]); ++ } ++ } ++ ++ return -ENOENT; ++} ++ ++static int pci_vpd_find_info_keyword(const u8 *buf, unsigned int off, ++ unsigned int len, const char *kw) ++{ ++ int i; ++ ++ for (i = off; i + PCI_VPD_INFO_FLD_HDR_SIZE <= off + len;) { ++ if (buf[i + 0] == kw[0] && ++ buf[i + 1] == kw[1]) ++ return i; ++ ++ i += PCI_VPD_INFO_FLD_HDR_SIZE + ++ pci_vpd_info_field_size(&buf[i]); ++ } ++ ++ return -ENOENT; ++} ++#endif ++ ++#ifndef BCM_HAS_INTX_MSI_WORKAROUND ++static inline void tg3_enable_intx(struct pci_dev *pdev) ++{ ++#if (LINUX_VERSION_CODE < 0x2060e) ++ u16 pci_command; ++ ++ pci_read_config_word(pdev, PCI_COMMAND, &pci_command); ++ if (pci_command & PCI_COMMAND_INTX_DISABLE) ++ pci_write_config_word(pdev, PCI_COMMAND, ++ pci_command & ~PCI_COMMAND_INTX_DISABLE); ++#else ++ pci_intx(pdev, 1); ++#endif ++} ++#endif /* BCM_HAS_INTX_MSI_WORKAROUND */ ++ ++ ++#if (LINUX_VERSION_CODE >= 0x20613) || \ ++ (defined(__VMKLNX__) && defined(__USE_COMPAT_LAYER_2_6_18_PLUS__)) ++#define BCM_HAS_NEW_IRQ_SIG ++#endif ++ ++#if defined(INIT_DELAYED_WORK_DEFERRABLE) || \ ++ defined(INIT_DEFERRABLE_WORK) || \ ++ defined(INIT_WORK_NAR) || \ ++ (defined(__VMKLNX__) && defined(__USE_COMPAT_LAYER_2_6_18_PLUS__)) ++#define BCM_HAS_NEW_INIT_WORK ++#endif ++ ++#ifndef ETH_FCS_LEN ++#define ETH_FCS_LEN 4 ++#endif ++ ++#ifndef BCM_HAS_PRINT_MAC ++ ++#ifndef DECLARE_MAC_BUF ++#define DECLARE_MAC_BUF(_mac) char _mac[18] ++#endif ++ ++#define MAC_FMT "%02x:%02x:%02x:%02x:%02x:%02x" ++ ++static char *print_mac(char * buf, const u8 *addr) ++{ ++ sprintf(buf, MAC_FMT, ++ addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]); ++ return buf; ++} ++#endif ++ ++ ++#ifndef NET_IP_ALIGN ++#define NET_IP_ALIGN 2 ++#endif ++ ++ ++#if !defined(BCM_HAS_ETHTOOL_OP_SET_TX_IPV6_CSUM) && \ ++ !defined(BCM_HAS_ETHTOOL_OP_SET_TX_HW_CSUM) && \ ++ defined(BCM_HAS_SET_TX_CSUM) ++static int tg3_set_tx_hw_csum(struct net_device *dev, u32 data) ++{ ++ if (data) ++ dev->features |= NETIF_F_HW_CSUM; ++ else ++ dev->features &= ~NETIF_F_HW_CSUM; ++ ++ return 0; ++} ++#endif ++ ++#ifndef NETDEV_TX_OK ++#define NETDEV_TX_OK 0 ++#endif ++ ++#ifndef NETDEV_TX_BUSY ++#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,32) ++#define NETDEV_TX_BUSY 0x1 ++#else ++#define NETDEV_TX_BUSY 0x10 ++#endif ++#endif ++ ++#ifndef NETDEV_TX_LOCKED ++#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,32) ++#define NETDEV_TX_LOCKED -1 ++#else ++#define NETDEV_TX_LOCKED 0x20 ++#endif ++#endif ++ ++#ifndef CHECKSUM_PARTIAL ++#define CHECKSUM_PARTIAL CHECKSUM_HW ++#endif ++ ++#ifndef NETIF_F_IPV6_CSUM ++#define NETIF_F_IPV6_CSUM 16 ++#define BCM_NO_IPV6_CSUM 1 ++#endif ++ ++#ifndef NETIF_F_RXCSUM ++#define NETIF_F_RXCSUM (1 << 29) ++#endif ++ ++#ifndef NETIF_F_GRO ++#define NETIF_F_GRO 16384 ++#endif ++ ++#ifndef NETIF_F_LOOPBACK ++#define NETIF_F_LOOPBACK (1 << 31) ++#endif ++ ++#ifdef NETIF_F_TSO ++#ifndef NETIF_F_GSO ++#define gso_size tso_size ++#define gso_segs tso_segs ++#endif ++#ifndef NETIF_F_TSO6 ++#define NETIF_F_TSO6 0 ++#define BCM_NO_TSO6 1 ++#endif ++#ifndef NETIF_F_TSO_ECN ++#define NETIF_F_TSO_ECN 0 ++#endif ++ ++#ifndef NETIF_F_ALL_TSO ++#define NETIF_F_ALL_TSO (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN) ++#endif ++ ++#ifndef BCM_HAS_SKB_TX_TIMESTAMP ++#define skb_tx_timestamp(skb) ++#endif ++ ++#ifdef BCM_HAS_SKB_SHARED_TX_UNION ++#define tx_flags tx_flags.flags ++ ++/* Definitions for tx_flags in struct skb_shared_info */ ++enum { ++ /* generate hardware time stamp */ ++ SKBTX_HW_TSTAMP = 1 << 0, ++ ++ /* device driver is going to provide hardware time stamp */ ++ SKBTX_IN_PROGRESS = 1 << 2, ++}; ++#endif ++ ++#ifndef BCM_HAS_SKB_FRAG_SIZE ++#define skb_frag_size(skb_frag) ((skb_frag)->size) ++#endif ++ ++#if (LINUX_VERSION_CODE < 0x2060c) ++static inline int skb_header_cloned(struct sk_buff *skb) { return 0; } ++#endif ++ ++#ifndef BCM_HAS_SKB_TRANSPORT_OFFSET ++static inline int skb_transport_offset(const struct sk_buff *skb) ++{ ++ return (int) (skb->h.raw - skb->data); ++} ++#endif ++ ++#ifndef BCM_HAS_IP_HDR ++static inline struct iphdr *ip_hdr(const struct sk_buff *skb) ++{ ++ return skb->nh.iph; ++} ++#endif ++ ++#ifndef BCM_HAS_IP_HDRLEN ++static inline unsigned int ip_hdrlen(const struct sk_buff *skb) ++{ ++ return ip_hdr(skb)->ihl * 4; ++} ++#endif ++ ++#ifndef BCM_HAS_TCP_HDR ++static inline struct tcphdr *tcp_hdr(const struct sk_buff *skb) ++{ ++ return skb->h.th; ++} ++#endif ++ ++#ifndef BCM_HAS_TCP_HDRLEN ++static inline unsigned int tcp_hdrlen(const struct sk_buff *skb) ++{ ++ return tcp_hdr(skb)->doff * 4; ++} ++#endif ++ ++#ifndef BCM_HAS_TCP_OPTLEN ++static inline unsigned int tcp_optlen(const struct sk_buff *skb) ++{ ++ return (tcp_hdr(skb)->doff - 5) * 4; ++} ++#endif ++ ++#ifndef NETIF_F_GSO ++static struct sk_buff *skb_segment(struct sk_buff *skb, int features) ++{ ++ struct sk_buff *segs = NULL; ++ struct sk_buff *tail = NULL; ++ unsigned int mss = skb_shinfo(skb)->gso_size; ++ unsigned int doffset = skb->data - skb->mac.raw; ++ unsigned int offset = doffset; ++ unsigned int headroom; ++ unsigned int len; ++ int nfrags = skb_shinfo(skb)->nr_frags; ++ int err = -ENOMEM; ++ int i = 0; ++ int pos; ++ ++ __skb_push(skb, doffset); ++ headroom = skb_headroom(skb); ++ pos = skb_headlen(skb); ++ ++ do { ++ struct sk_buff *nskb; ++ skb_frag_t *frag; ++ int hsize; ++ int k; ++ int size; ++ ++ len = skb->len - offset; ++ if (len > mss) ++ len = mss; ++ ++ hsize = skb_headlen(skb) - offset; ++ if (hsize < 0) ++ hsize = 0; ++ if (hsize > len) ++ hsize = len; ++ ++ nskb = alloc_skb(hsize + doffset + headroom, GFP_ATOMIC); ++ if (unlikely(!nskb)) ++ goto err; ++ ++ if (segs) ++ tail->next = nskb; ++ else ++ segs = nskb; ++ tail = nskb; ++ ++ nskb->dev = skb->dev; ++ nskb->priority = skb->priority; ++ nskb->protocol = skb->protocol; ++ nskb->dst = dst_clone(skb->dst); ++ memcpy(nskb->cb, skb->cb, sizeof(skb->cb)); ++ nskb->pkt_type = skb->pkt_type; ++ nskb->mac_len = skb->mac_len; ++ ++ skb_reserve(nskb, headroom); ++ nskb->mac.raw = nskb->data; ++ nskb->nh.raw = nskb->data + skb->mac_len; ++ nskb->h.raw = nskb->nh.raw + (skb->h.raw - skb->nh.raw); ++ memcpy(skb_put(nskb, doffset), skb->data, doffset); ++ ++ frag = skb_shinfo(nskb)->frags; ++ k = 0; ++ ++ nskb->ip_summed = CHECKSUM_PARTIAL; ++ nskb->csum = skb->csum; ++ memcpy(skb_put(nskb, hsize), skb->data + offset, hsize); ++ ++ while (pos < offset + len) { ++ BUG_ON(i >= nfrags); ++ ++ *frag = skb_shinfo(skb)->frags[i]; ++ get_page(frag->page); ++ size = frag->size; ++ ++ if (pos < offset) { ++ frag->page_offset += offset - pos; ++ frag->size -= offset - pos; ++ } ++ ++ k++; ++ ++ if (pos + size <= offset + len) { ++ i++; ++ pos += size; ++ } else { ++ frag->size -= pos + size - (offset + len); ++ break; ++ } ++ ++ frag++; ++ } ++ ++ skb_shinfo(nskb)->nr_frags = k; ++ nskb->data_len = len - hsize; ++ nskb->len += nskb->data_len; ++ nskb->truesize += nskb->data_len; ++ } while ((offset += len) < skb->len); ++ ++ return segs; ++ ++err: ++ while ((skb = segs)) { ++ segs = skb->next; ++ kfree(skb); ++ } ++ return ERR_PTR(err); ++} ++ ++static struct sk_buff *tcp_tso_segment(struct sk_buff *skb, int features) ++{ ++ struct sk_buff *segs = ERR_PTR(-EINVAL); ++ struct tcphdr *th; ++ unsigned thlen; ++ unsigned int seq; ++ u32 delta; ++ unsigned int oldlen; ++ unsigned int len; ++ ++ if (!pskb_may_pull(skb, sizeof(*th))) ++ goto out; ++ ++ th = skb->h.th; ++ thlen = th->doff * 4; ++ if (thlen < sizeof(*th)) ++ goto out; ++ ++ if (!pskb_may_pull(skb, thlen)) ++ goto out; ++ ++ oldlen = (u16)~skb->len; ++ __skb_pull(skb, thlen); ++ ++ segs = skb_segment(skb, features); ++ if (IS_ERR(segs)) ++ goto out; ++ ++ len = skb_shinfo(skb)->gso_size; ++ delta = htonl(oldlen + (thlen + len)); ++ ++ skb = segs; ++ th = skb->h.th; ++ seq = ntohl(th->seq); ++ ++ do { ++ th->fin = th->psh = 0; ++ ++ th->check = ~csum_fold((u32)((u32)th->check + ++ (u32)delta)); ++ seq += len; ++ skb = skb->next; ++ th = skb->h.th; ++ ++ th->seq = htonl(seq); ++ th->cwr = 0; ++ } while (skb->next); ++ ++ delta = htonl(oldlen + (skb->tail - skb->h.raw) + skb->data_len); ++ th->check = ~csum_fold((u32)((u32)th->check + ++ (u32)delta)); ++out: ++ return segs; ++} ++ ++static struct sk_buff *inet_gso_segment(struct sk_buff *skb, int features) ++{ ++ struct sk_buff *segs = ERR_PTR(-EINVAL); ++ struct iphdr *iph; ++ int ihl; ++ int id; ++ ++ if (unlikely(!pskb_may_pull(skb, sizeof(*iph)))) ++ goto out; ++ ++ iph = skb->nh.iph; ++ ihl = iph->ihl * 4; ++ if (ihl < sizeof(*iph)) ++ goto out; ++ ++ if (unlikely(!pskb_may_pull(skb, ihl))) ++ goto out; ++ ++ skb->h.raw = __skb_pull(skb, ihl); ++ iph = skb->nh.iph; ++ id = ntohs(iph->id); ++ segs = ERR_PTR(-EPROTONOSUPPORT); ++ ++ segs = tcp_tso_segment(skb, features); ++ ++ if (!segs || IS_ERR(segs)) ++ goto out; ++ ++ skb = segs; ++ do { ++ iph = skb->nh.iph; ++ iph->id = htons(id++); ++ iph->tot_len = htons(skb->len - skb->mac_len); ++ iph->check = 0; ++ iph->check = ip_fast_csum(skb->nh.raw, iph->ihl); ++ } while ((skb = skb->next)); ++ ++out: ++ return segs; ++} ++ ++static struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features) ++{ ++ struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT); ++ ++ skb->mac.raw = skb->data; ++ skb->mac_len = skb->nh.raw - skb->data; ++ __skb_pull(skb, skb->mac_len); ++ ++ segs = inet_gso_segment(skb, features); ++ ++ __skb_push(skb, skb->data - skb->mac.raw); ++ return segs; ++} ++#endif /* NETIF_F_GSO */ ++ ++#endif /* NETIF_F_TSO */ ++ ++#ifndef BCM_HAS_SKB_COPY_FROM_LINEAR_DATA ++static inline void skb_copy_from_linear_data(const struct sk_buff *skb, ++ void *to, ++ const unsigned int len) ++{ ++ memcpy(to, skb->data, len); ++} ++#endif ++ ++#if TG3_TSO_SUPPORT != 0 ++#if defined(BCM_NO_TSO6) ++static inline int skb_is_gso_v6(const struct sk_buff *skb) ++{ ++ return 0; ++} ++#else ++#if !defined(BCM_HAS_SKB_IS_GSO_V6) ++static inline int skb_is_gso_v6(const struct sk_buff *skb) ++{ ++ return skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6; ++} ++#endif ++#endif ++#endif ++ ++#ifndef BCM_HAS_SKB_CHECKSUM_NONE_ASSERT ++static inline void skb_checksum_none_assert(struct sk_buff *skb) ++{ ++ skb->ip_summed = CHECKSUM_NONE; ++} ++#endif ++ ++#ifndef BCM_HAS_NETDEV_TX_T ++typedef int netdev_tx_t; ++#endif ++ ++#ifndef BCM_HAS_NETDEV_FEATURES_T ++typedef u32 netdev_features_t; ++#endif ++ ++#ifndef BCM_HAS_NETDEV_NAME ++#define netdev_name(netdev) netdev->name ++#endif ++ ++#if defined(netdev_printk) && (LINUX_VERSION_CODE < 0x020609) ++/* SLES 9.X provides their own print routines, but they are not compatible ++ * with the versions found in the latest upstream kernel. The kernel ++ * version check above was picked out of the air as a value greater than ++ * 2.6.5-7.308, but any number that preserves this boundary should be ++ * acceptable. ++ */ ++#undef netdev_printk ++#undef netdev_info ++#undef netdev_err ++#undef netdev_warn ++#endif ++ ++#ifndef netdev_printk ++#define netdev_printk(level, netdev, format, args...) \ ++ dev_printk(level, tp->pdev->dev.parent, \ ++ "%s: " format, \ ++ netdev_name(tp->dev), ##args) ++#endif ++ ++#ifndef netif_printk ++#define netif_printk(priv, type, level, dev, fmt, args...) \ ++do { \ ++ if (netif_msg_##type(priv)) \ ++ netdev_printk(level, (dev), fmt, ##args); \ ++} while (0) ++#endif ++ ++#ifndef netif_info ++#define netif_info(priv, type, dev, fmt, args...) \ ++ netif_printk(priv, type, KERN_INFO, (dev), fmt, ##args) ++#endif ++ ++#ifndef netdev_err ++#define netdev_err(dev, format, args...) \ ++ netdev_printk(KERN_ERR, dev, format, ##args) ++#endif ++ ++#ifndef netdev_warn ++#define netdev_warn(dev, format, args...) \ ++ netdev_printk(KERN_WARNING, dev, format, ##args) ++#endif ++ ++#ifndef netdev_notice ++#define netdev_notice(dev, format, args...) \ ++ netdev_printk(KERN_NOTICE, dev, format, ##args) ++#endif ++ ++#ifndef netdev_info ++#define netdev_info(dev, format, args...) \ ++ netdev_printk(KERN_INFO, dev, format, ##args) ++#endif ++ ++#ifndef BCM_HAS_NETIF_TX_LOCK ++static inline void netif_tx_lock(struct net_device *dev) ++{ ++ spin_lock(&dev->xmit_lock); ++ dev->xmit_lock_owner = smp_processor_id(); ++} ++ ++static inline void netif_tx_unlock(struct net_device *dev) ++{ ++ dev->xmit_lock_owner = -1; ++ spin_unlock(&dev->xmit_lock); ++} ++#endif /* BCM_HAS_NETIF_TX_LOCK */ ++ ++#if defined(BCM_HAS_STRUCT_NETDEV_QUEUE) || \ ++ (defined(__VMKLNX__) && defined(__USE_COMPAT_LAYER_2_6_18_PLUS__)) ++ ++#define TG3_NAPI ++#define napi_complete_(dev, napi) napi_complete((napi)) ++#define napi_schedule_(dev, napi) napi_schedule((napi)) ++#define tg3_netif_rx_schedule_prep(dev, napi) napi_schedule_prep((napi)) ++ ++#else /* BCM_HAS_STRUCT_NETDEV_QUEUE */ ++ ++#define netdev_queue net_device ++#define netdev_get_tx_queue(dev, i) (dev) ++#define netif_tx_start_queue(dev) netif_start_queue((dev)) ++#define netif_tx_start_all_queues(dev) netif_start_queue((dev)) ++#define netif_tx_stop_queue(dev) netif_stop_queue((dev)) ++#define netif_tx_stop_all_queues(dev) netif_stop_queue((dev)) ++#define netif_tx_queue_stopped(dev) netif_queue_stopped((dev)) ++#define netif_tx_wake_queue(dev) netif_wake_queue((dev)) ++#define netif_tx_wake_all_queues(dev) netif_wake_queue((dev)) ++#define __netif_tx_lock(txq, procid) netif_tx_lock((txq)) ++#define __netif_tx_unlock(txq) netif_tx_unlock((txq)) ++ ++#if defined(BCM_HAS_NEW_NETIF_INTERFACE) ++#define TG3_NAPI ++#define napi_complete_(dev, napi) netif_rx_complete((dev), (napi)) ++#define napi_schedule_(dev, napi) netif_rx_schedule((dev), (napi)) ++#define tg3_netif_rx_schedule_prep(dev, napi) netif_rx_schedule_prep((dev), (napi)) ++#else /* BCM_HAS_NEW_NETIF_INTERFACE */ ++#define napi_complete_(dev, napi) netif_rx_complete((dev)) ++#define napi_schedule_(dev, napi) netif_rx_schedule((dev)) ++#define tg3_netif_rx_schedule_prep(dev, napi) netif_rx_schedule_prep((dev)) ++#endif /* BCM_HAS_NEW_NETIF_INTERFACE */ ++ ++#endif /* BCM_HAS_STRUCT_NETDEV_QUEUE */ ++ ++#if !defined(BCM_HAS_ALLOC_ETHERDEV_MQ) || !defined(TG3_NAPI) ++#define alloc_etherdev_mq(size, numqs) alloc_etherdev((size)) ++#endif ++ ++#if !defined(TG3_NAPI) || !defined(BCM_HAS_VLAN_GRO_RECEIVE) ++#define vlan_gro_receive(nap, grp, tag, skb) \ ++ vlan_hwaccel_receive_skb((skb), (grp), (tag)) ++#endif ++ ++#ifndef NETIF_F_HW_VLAN_CTAG_TX ++#define NETIF_F_HW_VLAN_CTAG_TX NETIF_F_HW_VLAN_TX ++#else ++#define BCM_HWACCEL_HAS_PROTO_ARG ++#endif ++ ++#ifndef NETIF_F_HW_VLAN_CTAG_RX ++#define NETIF_F_HW_VLAN_CTAG_RX NETIF_F_HW_VLAN_RX ++#endif ++#if !defined(TG3_NAPI) || !defined(BCM_HAS_NAPI_GRO_RECEIVE) ++#define napi_gro_receive(nap, skb) \ ++ netif_receive_skb((skb)) ++#endif ++ ++#if !defined(BCM_HAS_SKB_GET_QUEUE_MAPPING) || !defined(TG3_NAPI) ++#define skb_get_queue_mapping(skb) 0 ++#endif ++ ++#ifdef TG3_NAPI ++#if (LINUX_VERSION_CODE < 0x02061b) && !defined(__VMKLNX__) ++ ++static inline void netif_napi_del(struct napi_struct *napi) ++{ ++#ifdef CONFIG_NETPOLL ++ list_del(&napi->dev_list); ++#endif ++} ++#endif ++ ++#endif ++#if (LINUX_VERSION_CODE < 0x020612) ++static inline struct sk_buff *netdev_alloc_skb(struct net_device *dev, ++ unsigned int length) ++{ ++ struct sk_buff *skb = dev_alloc_skb(length); ++ if (skb) ++ skb->dev = dev; ++ return skb; ++} ++#endif ++ ++#ifndef BCM_HAS_NETDEV_PRIV ++static inline void *netdev_priv(struct net_device *dev) ++{ ++ return dev->priv; ++} ++#endif ++ ++#ifdef OLD_NETIF ++static inline void netif_poll_disable(struct net_device *dev) ++{ ++ while (test_and_set_bit(__LINK_STATE_RX_SCHED, &dev->state)) { ++ /* No hurry. */ ++ current->state = TASK_INTERRUPTIBLE; ++ schedule_timeout(1); ++ } ++} ++ ++static inline void netif_poll_enable(struct net_device *dev) ++{ ++ clear_bit(__LINK_STATE_RX_SCHED, &dev->state); ++} ++ ++static inline void netif_tx_disable(struct net_device *dev) ++{ ++ spin_lock_bh(&dev->xmit_lock); ++ netif_stop_queue(dev); ++ spin_unlock_bh(&dev->xmit_lock); ++} ++#endif /* OLD_NETIF */ ++ ++#ifndef BCM_HAS_NETDEV_SENT_QUEUE ++#define netdev_sent_queue(dev, bytes) ++#endif ++ ++#ifndef BCM_HAS_NETDEV_TX_SENT_QUEUE ++#define netdev_tx_sent_queue(q, bytes) \ ++ netdev_sent_queue(tp->dev, bytes) ++#endif ++ ++#ifndef BCM_HAS_NETDEV_COMPLETED_QUEUE ++#define netdev_completed_queue(dev, pkts, bytes) ++#endif ++ ++#ifndef BCM_HAS_NETDEV_TX_COMPLETED_QUEUE ++#define netdev_tx_completed_queue(q, pkt_cnt, byte_cnt) \ ++ netdev_completed_queue(tp->dev, pkt_cnt, byte_cnt) ++#endif ++ ++#ifndef BCM_HAS_NETDEV_RESET_QUEUE ++#define netdev_reset_queue(dev_queue) ++#endif ++ ++#ifndef BCM_HAS_NETDEV_TX_RESET_QUEUE ++#define netdev_tx_reset_queue(q) \ ++ netdev_reset_queue(tp->dev) ++#endif ++ ++#ifndef BCM_HAS_NETIF_SET_REAL_NUM_TX_QUEUES ++#define netif_set_real_num_tx_queues(dev, nq) ((dev)->real_num_tx_queues = (nq)) ++#endif ++ ++#ifndef BCM_HAS_NETIF_SET_REAL_NUM_RX_QUEUES ++#define netif_set_real_num_rx_queues(dev, nq) 0 ++#endif ++ ++#ifndef netdev_mc_count ++#define netdev_mc_count(dev) ((dev)->mc_count) ++#endif ++ ++#ifndef netdev_mc_empty ++#define netdev_mc_empty(dev) (netdev_mc_count(dev) == 0) ++#endif ++ ++/* ++ * Commit ID 22bedad3ce112d5ca1eaf043d4990fa2ed698c87 is the patch that ++ * undefines dmi_addr and pivots the code to use netdev_hw_addr rather ++ * than dev_mc_list. Commit ID 6683ece36e3531fc8c75f69e7165c5f20930be88 ++ * is the patch that introduces netdev_for_each_mc_addr. Commit ID ++ * f001fde5eadd915f4858d22ed70d7040f48767cf is the patch that introduces ++ * netdev_hw_addr. These features are presented in reverse chronological ++ * order. ++ */ ++#ifdef BCM_HAS_NETDEV_HW_ADDR ++#ifdef dmi_addr ++#undef netdev_for_each_mc_addr ++#define netdev_for_each_mc_addr(ha, dev) \ ++ struct dev_mc_list * oldmclist; \ ++ struct netdev_hw_addr foo; \ ++ ha = &foo; \ ++ for (oldmclist = dev->mc_list; oldmclist && memcpy(foo.addr, oldmclist->dmi_addr, 6); oldmclist = oldmclist->next) ++#endif ++#else /* BCM_HAS_NETDEV_HW_ADDR */ ++struct netdev_hw_addr { ++ u8 * addr; ++ struct dev_mc_list * curr; ++}; ++#undef netdev_for_each_mc_addr ++#define netdev_for_each_mc_addr(ha, dev) \ ++ struct netdev_hw_addr mclist; \ ++ ha = &mclist; \ ++ for (mclist.curr = dev->mc_list; mclist.curr && (mclist.addr = &mclist.curr->dmi_addr[0]); mclist.curr = mclist.curr->next) ++#endif /* BCM_HAS_NETDEV_HW_ADDR */ ++ ++#ifndef BCM_HAS_GET_STATS64 ++#define rtnl_link_stats64 net_device_stats ++#endif /* BCM_HAS_GET_STATS64 */ ++ ++#ifndef BCM_HAS_EXTERNAL_LB_DONE ++#define ETH_TEST_FL_EXTERNAL_LB (1 << 2) ++#define ETH_TEST_FL_EXTERNAL_LB_DONE (1 << 3) ++#endif ++ ++#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) ++#define BCM_KERNEL_SUPPORTS_8021Q ++#endif ++ ++#ifndef ETH_SS_TEST ++#define ETH_SS_TEST 0 ++#endif ++#ifndef ETH_SS_STATS ++#define ETH_SS_STATS 1 ++#endif ++#ifndef ADVERTISED_Pause ++#define ADVERTISED_Pause (1 << 13) ++#endif ++#ifndef ADVERTISED_Asym_Pause ++#define ADVERTISED_Asym_Pause (1 << 14) ++#endif ++ ++#ifndef ADVERTISED_1000baseKX_Full ++#define ADVERTISED_1000baseKX_Full (1 << 17) ++#endif ++ ++#ifndef MII_CTRL1000 ++#define MII_CTRL1000 0x09 ++#endif ++#ifndef ADVERTISE_1000HALF ++#define ADVERTISE_1000HALF 0x0100 ++#endif ++#ifndef ADVERTISE_1000FULL ++#define ADVERTISE_1000FULL 0x0200 ++#endif ++#ifndef CTL1000_AS_MASTER ++#define CTL1000_AS_MASTER 0x0800 ++#endif ++#ifndef CTL1000_ENABLE_MASTER ++#define CTL1000_ENABLE_MASTER 0x1000 ++#endif ++#ifndef MII_STAT1000 ++#define MII_STAT1000 0x0a ++#endif ++#ifndef BMCR_SPEED1000 ++#define BMCR_SPEED1000 0x0040 ++#endif ++#ifndef ADVERTISE_1000XFULL ++#define ADVERTISE_1000XFULL 0x0020 ++#endif ++#ifndef ADVERTISE_1000XHALF ++#define ADVERTISE_1000XHALF 0x0040 ++#endif ++#ifndef ADVERTISE_1000XPAUSE ++#define ADVERTISE_1000XPAUSE 0x0080 ++#endif ++#ifndef ADVERTISE_1000XPSE_ASYM ++#define ADVERTISE_1000XPSE_ASYM 0x0100 ++#endif ++#ifndef ADVERTISE_PAUSE ++#define ADVERTISE_PAUSE_CAP 0x0400 ++#endif ++#ifndef ADVERTISE_PAUSE_ASYM ++#define ADVERTISE_PAUSE_ASYM 0x0800 ++#endif ++#ifndef LPA_1000XFULL ++#define LPA_1000XFULL 0x0020 ++#endif ++#ifndef LPA_1000XHALF ++#define LPA_1000XHALF 0x0040 ++#endif ++#ifndef LPA_1000XPAUSE ++#define LPA_1000XPAUSE 0x0080 ++#endif ++#ifndef LPA_1000XPAUSE_ASYM ++#define LPA_1000XPAUSE_ASYM 0x0100 ++#endif ++#ifndef LPA_PAUSE ++#define LPA_PAUSE_CAP 0x0400 ++#endif ++#ifndef LPA_PAUSE_ASYM ++#define LPA_PAUSE_ASYM 0x0800 ++#endif ++#ifndef LPA_1000FULL ++#define LPA_1000FULL 0x0800 ++#endif ++#ifndef LPA_1000HALF ++#define LPA_1000HALF 0x0400 ++#endif ++ ++#ifndef ETHTOOL_FWVERS_LEN ++#define ETHTOOL_FWVERS_LEN 32 ++#endif ++ ++#ifndef MDIO_MMD_AN ++#define MDIO_MMD_AN 7 ++#endif ++ ++#ifndef MDIO_AN_EEE_ADV ++#define MDIO_AN_EEE_ADV 60 ++#endif ++ ++#ifndef MDIO_AN_EEE_ADV_100TX ++#define MDIO_AN_EEE_ADV_100TX 0x0002 ++#endif ++ ++#ifndef MDIO_AN_EEE_ADV_1000T ++#define MDIO_AN_EEE_ADV_1000T 0x0004 ++#endif ++ ++#ifndef MDIO_AN_EEE_LPABLE ++#define MDIO_AN_EEE_LPABLE 61 ++#endif ++ ++#ifndef MDIO_EEE_100TX ++#define MDIO_EEE_100TX MDIO_AN_EEE_ADV_100TX /* 100TX EEE cap */ ++#endif ++ ++#ifndef MDIO_EEE_1000T ++#define MDIO_EEE_1000T MDIO_AN_EEE_ADV_1000T /* 1000T EEE cap */ ++#endif ++ ++#ifndef MDIO_EEE_1000KX ++#define MDIO_EEE_1000KX 0x0010 /* 1000KX EEE cap */ ++#endif ++ ++#ifndef BCM_HAS_MMD_EEE_ADV_TO_ETHTOOL ++/** ++ * mmd_eee_adv_to_ethtool_adv_t ++ * @eee_adv: value of the MMD EEE Advertisement/Link Partner Ability registers ++ * ++ * A small helper function that translates the MMD EEE Advertisment (7.60) ++ * and MMD EEE Link Partner Ability (7.61) bits to ethtool advertisement ++ * settings. ++ */ ++static inline u32 mmd_eee_adv_to_ethtool_adv_t(u16 eee_adv) ++{ ++ u32 adv = 0; ++ ++ if (eee_adv & MDIO_EEE_100TX) ++ adv |= ADVERTISED_100baseT_Full; ++ if (eee_adv & MDIO_EEE_1000T) ++ adv |= ADVERTISED_1000baseT_Full; ++ if (eee_adv & MDIO_EEE_1000KX) ++ adv |= ADVERTISED_1000baseKX_Full; ++ ++ return adv; ++} ++#endif ++ ++#ifndef SPEED_UNKNOWN ++#define SPEED_UNKNOWN -1 ++#endif ++ ++#ifndef DUPLEX_UNKNOWN ++#define DUPLEX_UNKNOWN 0xff ++#endif ++ ++#ifndef BCM_HAS_ETHTOOL_ADV_TO_MII_ADV_T ++static inline u32 ethtool_adv_to_mii_adv_t(u32 ethadv) ++{ ++ u32 result = 0; ++ ++ if (ethadv & ADVERTISED_10baseT_Half) ++ result |= ADVERTISE_10HALF; ++ if (ethadv & ADVERTISED_10baseT_Full) ++ result |= ADVERTISE_10FULL; ++ if (ethadv & ADVERTISED_100baseT_Half) ++ result |= ADVERTISE_100HALF; ++ if (ethadv & ADVERTISED_100baseT_Full) ++ result |= ADVERTISE_100FULL; ++ if (ethadv & ADVERTISED_Pause) ++ result |= ADVERTISE_PAUSE_CAP; ++ if (ethadv & ADVERTISED_Asym_Pause) ++ result |= ADVERTISE_PAUSE_ASYM; ++ ++ return result; ++} ++ ++static inline u32 mii_adv_to_ethtool_adv_t(u32 adv) ++{ ++ u32 result = 0; ++ ++ if (adv & ADVERTISE_10HALF) ++ result |= ADVERTISED_10baseT_Half; ++ if (adv & ADVERTISE_10FULL) ++ result |= ADVERTISED_10baseT_Full; ++ if (adv & ADVERTISE_100HALF) ++ result |= ADVERTISED_100baseT_Half; ++ if (adv & ADVERTISE_100FULL) ++ result |= ADVERTISED_100baseT_Full; ++ if (adv & ADVERTISE_PAUSE_CAP) ++ result |= ADVERTISED_Pause; ++ if (adv & ADVERTISE_PAUSE_ASYM) ++ result |= ADVERTISED_Asym_Pause; ++ ++ return result; ++} ++ ++static inline u32 ethtool_adv_to_mii_ctrl1000_t(u32 ethadv) ++{ ++ u32 result = 0; ++ ++ if (ethadv & ADVERTISED_1000baseT_Half) ++ result |= ADVERTISE_1000HALF; ++ if (ethadv & ADVERTISED_1000baseT_Full) ++ result |= ADVERTISE_1000FULL; ++ ++ return result; ++} ++ ++static inline u32 mii_ctrl1000_to_ethtool_adv_t(u32 adv) ++{ ++ u32 result = 0; ++ ++ if (adv & ADVERTISE_1000HALF) ++ result |= ADVERTISED_1000baseT_Half; ++ if (adv & ADVERTISE_1000FULL) ++ result |= ADVERTISED_1000baseT_Full; ++ ++ return result; ++} ++ ++static inline u32 mii_lpa_to_ethtool_lpa_t(u32 lpa) ++{ ++ u32 result = 0; ++ ++ if (lpa & LPA_LPACK) ++ result |= ADVERTISED_Autoneg; ++ ++ return result | mii_adv_to_ethtool_adv_t(lpa); ++} ++ ++static inline u32 mii_stat1000_to_ethtool_lpa_t(u32 lpa) ++{ ++ u32 result = 0; ++ ++ if (lpa & LPA_1000HALF) ++ result |= ADVERTISED_1000baseT_Half; ++ if (lpa & LPA_1000FULL) ++ result |= ADVERTISED_1000baseT_Full; ++ ++ return result; ++} ++ ++static inline u32 ethtool_adv_to_mii_adv_x(u32 ethadv) ++{ ++ u32 result = 0; ++ ++ if (ethadv & ADVERTISED_1000baseT_Half) ++ result |= ADVERTISE_1000XHALF; ++ if (ethadv & ADVERTISED_1000baseT_Full) ++ result |= ADVERTISE_1000XFULL; ++ if (ethadv & ADVERTISED_Pause) ++ result |= ADVERTISE_1000XPAUSE; ++ if (ethadv & ADVERTISED_Asym_Pause) ++ result |= ADVERTISE_1000XPSE_ASYM; ++ ++ return result; ++} ++ ++static inline u32 mii_adv_to_ethtool_adv_x(u32 adv) ++{ ++ u32 result = 0; ++ ++ if (adv & ADVERTISE_1000XHALF) ++ result |= ADVERTISED_1000baseT_Half; ++ if (adv & ADVERTISE_1000XFULL) ++ result |= ADVERTISED_1000baseT_Full; ++ if (adv & ADVERTISE_1000XPAUSE) ++ result |= ADVERTISED_Pause; ++ if (adv & ADVERTISE_1000XPSE_ASYM) ++ result |= ADVERTISED_Asym_Pause; ++ ++ return result; ++} ++ ++static inline u32 mii_lpa_to_ethtool_lpa_x(u32 lpa) ++{ ++ u32 result = 0; ++ ++ if (lpa & LPA_LPACK) ++ result |= ADVERTISED_Autoneg; ++ ++ return result | mii_adv_to_ethtool_adv_x(lpa); ++} ++#endif /* BCM_HAS_ETHTOOL_ADV_TO_MII_100BT */ ++ ++#ifndef BCM_HAS_ETHTOOL_RXFH_INDIR_DEFAULT ++static inline u32 ethtool_rxfh_indir_default(u32 index, u32 n_rx_rings) ++{ ++ return index % n_rx_rings; ++} ++#endif /* BCM_HAS_ETHTOOL_RXFH_INDIR_DEFAULT */ ++ ++#ifndef BCM_HAS_MII_RESOLVE_FLOWCTRL_FDX ++#ifndef FLOW_CTRL_TX ++#define FLOW_CTRL_TX 0x01 ++#endif ++#ifndef FLOW_CTRL_RX ++#define FLOW_CTRL_RX 0x02 ++#endif ++static u8 mii_resolve_flowctrl_fdx(u16 lcladv, u16 rmtadv) ++{ ++ u8 cap = 0; ++ ++ if (lcladv & rmtadv & ADVERTISE_PAUSE_CAP) { ++ cap = FLOW_CTRL_TX | FLOW_CTRL_RX; ++ } else if (lcladv & ADVERTISE_PAUSE_ASYM) { ++ if (lcladv & LPA_PAUSE_CAP) ++ cap = FLOW_CTRL_RX; ++ if (rmtadv & LPA_PAUSE_CAP) ++ cap = FLOW_CTRL_TX; ++ } ++ ++ return cap; ++} ++#endif /* BCM_HAS_MII_RESOLVE_FLOWCTRL_FDX */ ++ ++#ifndef BCM_HAS_MII_ADVERTISE_FLOWCTRL ++static u16 mii_advertise_flowctrl(u8 flow_ctrl) ++{ ++ u16 miireg; ++ ++ if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX)) ++ miireg = ADVERTISE_PAUSE_CAP; ++ else if (flow_ctrl & FLOW_CTRL_TX) ++ miireg = ADVERTISE_PAUSE_ASYM; ++ else if (flow_ctrl & FLOW_CTRL_RX) ++ miireg = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM; ++ else ++ miireg = 0; ++ ++ return miireg; ++} ++#endif /* BCM_HAS_MII_ADVERTISE_FLOWCTRL */ ++ ++#ifdef BCM_INCLUDE_PHYLIB_SUPPORT ++ ++#ifndef PHY_ID_BCM50610 ++#define PHY_ID_BCM50610 0x0143bd60 ++#endif ++#ifndef PHY_ID_BCM50610M ++#define PHY_ID_BCM50610M 0x0143bd70 ++#endif ++#ifndef PHY_ID_BCM50612E ++#define PHY_ID_BCM50612E 0x03625e20 ++#endif ++#ifndef PHY_ID_BCMAC131 ++#define PHY_ID_BCMAC131 0x0143bc70 ++#endif ++#ifndef PHY_ID_BCM57780 ++#define PHY_ID_BCM57780 0x03625d90 ++#endif ++#ifndef PHY_BCM_OUI_MASK ++#define PHY_BCM_OUI_MASK 0xfffffc00 ++#endif ++#ifndef PHY_BCM_OUI_1 ++#define PHY_BCM_OUI_1 0x00206000 ++#endif ++#ifndef PHY_BCM_OUI_2 ++#define PHY_BCM_OUI_2 0x0143bc00 ++#endif ++#ifndef PHY_BCM_OUI_3 ++#define PHY_BCM_OUI_3 0x03625c00 ++#endif ++ ++#ifndef PHY_BRCM_STD_IBND_DISABLE ++#define PHY_BRCM_STD_IBND_DISABLE 0x00000800 ++#define PHY_BRCM_EXT_IBND_RX_ENABLE 0x00001000 ++#define PHY_BRCM_EXT_IBND_TX_ENABLE 0x00002000 ++#endif ++ ++#ifndef PHY_BRCM_RX_REFCLK_UNUSED ++#define PHY_BRCM_RX_REFCLK_UNUSED 0x00000400 ++#endif ++ ++#ifndef PHY_BRCM_CLEAR_RGMII_MODE ++#define PHY_BRCM_CLEAR_RGMII_MODE 0x00004000 ++#endif ++ ++#ifndef PHY_BRCM_DIS_TXCRXC_NOENRGY ++#define PHY_BRCM_DIS_TXCRXC_NOENRGY 0x00008000 ++#endif ++ ++#ifndef BCM_HAS_MDIOBUS_ALLOC ++static struct mii_bus *mdiobus_alloc(void) ++{ ++ struct mii_bus *bus; ++ ++ bus = kzalloc(sizeof(*bus), GFP_KERNEL); ++ ++ return bus; ++} ++ ++void mdiobus_free(struct mii_bus *bus) ++{ ++ kfree(bus); ++} ++#endif ++ ++#endif /* BCM_INCLUDE_PHYLIB_SUPPORT */ ++ ++#ifndef BCM_HAS_ETHTOOL_CMD_SPEED ++static inline __u32 ethtool_cmd_speed(struct ethtool_cmd *ep) ++{ ++ return ep->speed; ++} ++#endif /* BCM_HAS_ETHTOOL_CMD_SPEED */ ++ ++#ifndef BCM_HAS_ETHTOOL_CMD_SPEED_SET ++static inline __u32 ethtool_cmd_speed_set(struct ethtool_cmd *ep, __u32 speed) ++{ ++ ep->speed = speed; ++ return 0; ++} ++#endif /* BCM_HAS_ETHTOOL_CMD_SPEED_SET */ ++ ++#ifdef BCM_HAS_PCI_BUSN_RES ++#define busn_res_end busn_res.end ++#else ++#define busn_res_end subordinate ++#endif ++ ++#ifndef __devinit ++#define __devinit ++#endif ++ ++#ifndef __devinitdata ++#define __devinitdata ++#endif ++ ++#ifndef __devexit ++#define __devexit ++#endif ++ ++#ifndef __devexit_p ++#define __devexit_p(x) (x) ++#endif ++ ++#ifndef CONFIG_SSB_DRIVER_GIGE ++#define ssb_gige_get_macaddr(a, b) (0) ++#define ssb_gige_get_phyaddr(a) (0) ++#define pdev_is_ssb_gige_core(a) (0) ++#define ssb_gige_must_flush_posted_writes(a) (0) ++#define ssb_gige_one_dma_at_once(a) (0) ++#define ssb_gige_have_roboswitch(a) (0) ++#define ssb_gige_is_rgmii(a) (0) ++#else ++#include ++#endif ++ ++#ifndef ETHTOOL_GEEE ++struct ethtool_eee { ++ __u32 cmd; ++ __u32 supported; ++ __u32 advertised; ++ __u32 lp_advertised; ++ __u32 eee_active; ++ __u32 eee_enabled; ++ __u32 tx_lpi_enabled; ++ __u32 tx_lpi_timer; ++ __u32 reserved[2]; ++}; ++#endif ++ ++#ifdef __VMKLNX__ ++#ifndef SYSTEM_POWER_OFF ++#define SYSTEM_POWER_OFF (3) ++#endif ++ ++#define system_state SYSTEM_POWER_OFF ++#endif ++ ++#ifndef BCM_HAS_PCI_CHANNEL_OFFLINE ++static inline int pci_channel_offline(struct pci_dev *pdev) ++{ ++#ifdef BCM_HAS_PCI_CHANNEL_IO_NORMAL_ENUM ++ return (pdev->error_state != pci_channel_io_normal); ++#else ++ return 0; ++#endif ++} ++#endif /*BCM_HAS_PCI_CHANNEL_OFFLINE*/ ++ ++#ifndef BCM_HAS_PCI_IS_ENABLED ++static inline int pci_is_enabled(struct pci_dev *pdev) ++{ ++ return 1; ++} ++#endif ++ ++#ifndef BCM_HAS_PCI_DEV_IS_PRESENT ++static inline int pci_device_is_present(struct pci_dev *pdev) ++{ ++ return 1; ++} ++#endif ++#ifndef BCM_HAS_DMA_ZALLOC_COHERENT ++#ifndef __GFP_ZERO ++ #define ___GFP_ZERO 0x8000u ++ #define __GFP_ZERO ((__force unsigned)___GFP_ZERO) /* Return zeroed page on success */ ++#endif ++ ++static inline void *dma_zalloc_coherent(struct device *dev, size_t size, ++ dma_addr_t *dma_handle, unsigned flag) ++{ ++ void *ret = dma_alloc_coherent(dev, size, dma_handle, ++ flag | __GFP_ZERO); ++ return ret; ++} ++#endif ++ ++#ifndef DEFAULT_MAX_NUM_RSS_QUEUES ++#define DEFAULT_MAX_NUM_RSS_QUEUES (8) ++#endif ++ ++#ifndef BCM_HAS_GET_NUM_DFLT_RSS_QS ++int netif_get_num_default_rss_queues(void) ++{ ++ return min_t(int, DEFAULT_MAX_NUM_RSS_QUEUES, num_online_cpus()); ++} ++#endif ++ ++#ifndef SIOCGHWTSTAMP ++#define SIOCGHWTSTAMP 0x89b1 ++#endif +diff --git a/drivers/net/ethernet/broadcom/tg3/tg3_compat2.h b/drivers/net/ethernet/broadcom/tg3/tg3_compat2.h +new file mode 100644 +index 0000000..07c968d +--- /dev/null ++++ b/drivers/net/ethernet/broadcom/tg3/tg3_compat2.h +@@ -0,0 +1,518 @@ ++/* Copyright (C) 2009-2015 Broadcom Corporation. */ ++ ++#ifndef BCM_HAS_PCI_PCIE_CAP ++static inline int pci_pcie_cap(struct pci_dev *pdev) ++{ ++ struct net_device *dev = pci_get_drvdata(pdev); ++ struct tg3 *tp = netdev_priv(dev); ++ ++ return tp->pcie_cap; ++} ++#endif ++ ++#ifndef BCM_HAS_PCI_IS_PCIE ++static inline bool pci_is_pcie(struct pci_dev *dev) ++{ ++ return !!pci_pcie_cap(dev); ++} ++#endif ++ ++#ifndef BCM_HAS_PCIE_CAP_RW ++static inline int pcie_capability_set_word(struct pci_dev *dev, int pos, ++ u16 set) ++{ ++ u16 val; ++ int rval; ++ ++ rval = pci_read_config_word(dev, pci_pcie_cap(dev) + pos, &val); ++ ++ if (!rval) { ++ val |= set; ++ rval = pci_write_config_word(dev, pci_pcie_cap(dev) + pos, val); ++ } ++ ++ return rval; ++} ++ ++static inline int pcie_capability_clear_word(struct pci_dev *dev, int pos, ++ u16 clear) ++{ ++ u16 val; ++ int rval; ++ ++ rval = pci_read_config_word(dev, pci_pcie_cap(dev) + pos, &val); ++ ++ if (!rval) { ++ val &= ~clear; ++ rval = pci_write_config_word(dev, pci_pcie_cap(dev) + pos, val); ++ } ++ ++ return rval; ++} ++ ++static int pcie_capability_write_word(struct pci_dev *dev, int pos, u16 val) ++{ ++ return pci_write_config_word(dev, pci_pcie_cap(dev) + pos, val); ++} ++ ++static int pcie_capability_read_word(struct pci_dev *dev, int pos, u16 *val) ++{ ++ return pci_read_config_word(dev, pci_pcie_cap(dev) + pos, val); ++} ++#endif ++ ++#ifndef BCM_HAS_SKB_FRAG_DMA_MAP ++#define skb_frag_dma_map(x, frag, y, len, z) \ ++ pci_map_page(tp->pdev, (frag)->page, \ ++ (frag)->page_offset, (len), PCI_DMA_TODEVICE) ++#endif ++ ++#ifdef SIMPLE_DEV_PM_OPS ++ ++#define tg3_invalid_pci_state(tp, state) false ++#define tg3_pci_save_state(tp) ++#define tg3_pci_restore_state(tp) ++ ++#else /* SIMPLE_DEV_PM_OPS */ ++ ++#if (LINUX_VERSION_CODE < 0x2060b) ++static bool tg3_invalid_pci_state(struct tg3 *tp, u32 state) ++{ ++ bool ret = true; ++ pci_power_t target_state; ++ ++ target_state = pci_choose_state(tp->pdev, state); ++ if (target_state != PCI_D3hot || target_state != PCI_D3cold) ++ ret = false; ++ ++ return ret; ++} ++#else ++static bool tg3_invalid_pci_state(struct tg3 *tp, pm_message_t state) ++{ ++ bool ret = true; ++ pci_power_t target_state; ++ ++#ifdef BCM_HAS_PCI_TARGET_STATE ++ target_state = tp->pdev->pm_cap ? pci_target_state(tp->pdev) : PCI_D3hot; ++#else ++ target_state = pci_choose_state(tp->pdev, state); ++#endif ++ if (target_state != PCI_D3hot || target_state != PCI_D3cold) ++ ret = false; ++ ++ return ret; ++} ++#endif ++ ++#if (LINUX_VERSION_CODE < 0x2060a) ++#define tg3_pci_save_state(tp) pci_save_state(tp->pdev, tp->pci_cfg_state) ++#define tg3_pci_restore_state(tp) pci_restore_state(tp->pdev, tp->pci_cfg_state) ++#else ++#define tg3_pci_save_state(tp) pci_save_state(tp->pdev) ++#define tg3_pci_restore_state(tp) pci_restore_state(tp->pdev) ++#endif ++ ++#endif /* SIMPLE_DEV_PM_OPS */ ++ ++ ++#ifdef BCM_HAS_NEW_PCI_DMA_MAPPING_ERROR ++#define pci_dma_mapping_error_(pdev, mapping) pci_dma_mapping_error((pdev), (mapping)) ++#define dma_mapping_error_(pdev, mapping) dma_mapping_error((pdev), (mapping)) ++#elif defined(BCM_HAS_PCI_DMA_MAPPING_ERROR) ++#define pci_dma_mapping_error_(pdev, mapping) pci_dma_mapping_error((mapping)) ++#define dma_mapping_error_(pdev, mapping) dma_mapping_error((mapping)) ++#else ++#define pci_dma_mapping_error_(pdev, mapping) 0 ++#define dma_mapping_error_(pdev, mapping) 0 ++#endif ++ ++#ifndef BCM_HAS_HW_FEATURES ++#define hw_features features ++#endif ++ ++#ifndef BCM_HAS_VLAN_FEATURES ++#define vlan_features features ++#endif ++ ++#ifdef HAVE_POLL_CONTROLLER ++#define CONFIG_NET_POLL_CONTROLLER ++#endif ++ ++static inline void tg3_5780_class_intx_workaround(struct tg3 *tp) ++{ ++#ifndef BCM_HAS_INTX_MSI_WORKAROUND ++ if (tg3_flag(tp, 5780_CLASS) && ++ tg3_flag(tp, USING_MSI)) ++ tg3_enable_intx(tp->pdev); ++#endif ++} ++ ++#ifdef BCM_HAS_TXQ_TRANS_UPDATE ++#define tg3_update_trans_start(dev) ++#else ++#define tg3_update_trans_start(dev) ((dev)->trans_start = jiffies) ++#endif ++ ++#ifndef BCM_HAS_NEW_VLAN_INTERFACE ++#define TG3_TO_INT(Y) ((int)((ptrdiff_t)(Y) & (SMP_CACHE_BYTES - 1))) ++#define TG3_COMPAT_VLAN_ALLOC_LEN (SMP_CACHE_BYTES + VLAN_HLEN) ++#define TG3_COMPAT_VLAN_RESERVE(addr) (SKB_DATA_ALIGN((addr) + VLAN_HLEN) - (addr)) ++#else ++#define TG3_COMPAT_VLAN_ALLOC_LEN 0 ++#define TG3_COMPAT_VLAN_RESERVE(addr) 0 ++#endif ++ ++#ifdef BCM_KERNEL_SUPPORTS_8021Q ++ ++#ifndef BCM_HAS_NEW_VLAN_INTERFACE ++#undef TG3_RAW_IP_ALIGN ++#define TG3_RAW_IP_ALIGN (2 + VLAN_HLEN) ++#endif /* BCM_HAS_NEW_VLAN_INTERFACE */ ++ ++#ifndef BCM_HAS_NEW_VLAN_INTERFACE ++static void __tg3_set_rx_mode(struct net_device *); ++static inline void tg3_netif_start(struct tg3 *tp); ++static inline void tg3_netif_stop(struct tg3 *tp); ++static inline void tg3_full_lock(struct tg3 *tp, int irq_sync); ++static inline void tg3_full_unlock(struct tg3 *tp); ++ ++static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp) ++{ ++ struct tg3 *tp = netdev_priv(dev); ++ ++ if (!netif_running(dev)) { ++ tp->vlgrp = grp; ++ return; ++ } ++ ++ tg3_netif_stop(tp); ++ ++ tg3_full_lock(tp, 0); ++ ++ tp->vlgrp = grp; ++ ++ /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */ ++ __tg3_set_rx_mode(dev); ++ ++ tg3_netif_start(tp); ++ ++ tg3_full_unlock(tp); ++} ++ ++#ifndef BCM_HAS_NET_DEVICE_OPS ++#ifndef BCM_HAS_VLAN_GROUP_SET_DEVICE ++static inline void vlan_group_set_device(struct vlan_group *vg, int vlan_id, ++ struct net_device *dev) ++{ ++ if (vg) ++ vg->vlan_devices[vlan_id] = dev; ++} ++#endif ++ ++static void tg3_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) ++{ ++ struct tg3 *tp = netdev_priv(dev); ++ ++ if (netif_running(dev)) ++ tg3_netif_stop(tp); ++ ++ tg3_full_lock(tp, 0); ++ vlan_group_set_device(tp->vlgrp, vid, NULL); ++ tg3_full_unlock(tp); ++ ++ if (netif_running(dev)) ++ tg3_netif_start(tp); ++} ++#endif /* BCM_HAS_NET_DEVICE_OPS */ ++#endif /* BCM_USE_OLD_VLAN_INTERFACE */ ++#endif /* BCM_KERNEL_SUPPORTS_8021Q */ ++ ++ ++#ifndef BCM_HAS_NETDEV_UPDATE_FEATURES ++static u32 tg3_get_rx_csum(struct net_device *dev) ++{ ++ return (dev->features & NETIF_F_RXCSUM) != 0; ++} ++ ++static int tg3_set_rx_csum(struct net_device *dev, u32 data) ++{ ++ struct tg3 *tp = netdev_priv(dev); ++ ++ /* BROKEN_CHECKSUMS */ ++ if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0) { ++ if (data != 0) ++ return -EINVAL; ++ return 0; ++ } ++ ++ spin_lock_bh(&tp->lock); ++ if (data) ++ dev->features |= NETIF_F_RXCSUM; ++ else ++ dev->features &= ~NETIF_F_RXCSUM; ++ spin_unlock_bh(&tp->lock); ++ ++ return 0; ++} ++ ++#ifdef BCM_HAS_SET_TX_CSUM ++static int tg3_set_tx_csum(struct net_device *dev, u32 data) ++{ ++ struct tg3 *tp = netdev_priv(dev); ++ ++ /* BROKEN_CHECKSUMS */ ++ if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0) { ++ if (data != 0) ++ return -EINVAL; ++ return 0; ++ } ++ ++ if (tg3_flag(tp, 5755_PLUS)) ++#if defined(BCM_HAS_ETHTOOL_OP_SET_TX_IPV6_CSUM) ++ ethtool_op_set_tx_ipv6_csum(dev, data); ++#elif defined(BCM_HAS_ETHTOOL_OP_SET_TX_HW_CSUM) ++ ethtool_op_set_tx_hw_csum(dev, data); ++#else ++ tg3_set_tx_hw_csum(dev, data); ++#endif ++ else ++ ethtool_op_set_tx_csum(dev, data); ++ ++ return 0; ++} ++#endif ++ ++#if TG3_TSO_SUPPORT != 0 ++static int tg3_set_tso(struct net_device *dev, u32 value) ++{ ++ struct tg3 *tp = netdev_priv(dev); ++ ++ if (!tg3_flag(tp, TSO_CAPABLE)) { ++ if (value) ++ return -EINVAL; ++ return 0; ++ } ++ if ((dev->features & NETIF_F_IPV6_CSUM) && ++ (tg3_flag(tp, HW_TSO_2) || ++ tg3_flag(tp, HW_TSO_3))) { ++ if (value) { ++ dev->features |= NETIF_F_TSO6; ++ if (tg3_flag(tp, HW_TSO_3) || ++ tg3_asic_rev(tp) == ASIC_REV_5761 || ++ (tg3_asic_rev(tp) == ASIC_REV_5784 && ++ tg3_chip_rev(tp) != CHIPREV_5784_AX) || ++ tg3_asic_rev(tp) == ASIC_REV_5785 || ++ tg3_asic_rev(tp) == ASIC_REV_57780) ++ dev->features |= NETIF_F_TSO_ECN; ++ } else ++ dev->features &= ~(NETIF_F_TSO6 | NETIF_F_TSO_ECN); ++ } ++ return ethtool_op_set_tso(dev, value); ++} ++#endif ++ ++static void netdev_update_features(struct net_device *dev) ++{ ++ struct tg3 *tp = netdev_priv(dev); ++ ++ if (dev->mtu > ETH_DATA_LEN) { ++ if (tg3_flag(tp, 5780_CLASS)) { ++#if TG3_TSO_SUPPORT != 0 ++ ethtool_op_set_tso(dev, 0); ++#endif ++ } ++ } ++} ++#endif /* BCM_HAS_NETDEV_UPDATE_FEATURES */ ++ ++#if !defined(BCM_HAS_SET_PHYS_ID) || defined(GET_ETHTOOL_OP_EXT) ++ ++#if !defined(BCM_HAS_SET_PHYS_ID) ++enum ethtool_phys_id_state { ++ ETHTOOL_ID_INACTIVE, ++ ETHTOOL_ID_ACTIVE, ++ ETHTOOL_ID_ON, ++ ETHTOOL_ID_OFF ++}; ++#endif ++ ++static int tg3_set_phys_id(struct net_device *dev, ++ enum ethtool_phys_id_state state); ++static int tg3_phys_id(struct net_device *dev, u32 data) ++{ ++ struct tg3 *tp = netdev_priv(dev); ++ int i; ++ ++ if (!netif_running(tp->dev)) ++ return -EAGAIN; ++ ++ if (data == 0) ++ data = UINT_MAX / 2; ++ ++ for (i = 0; i < (data * 2); i++) { ++ if ((i % 2) == 0) ++ tg3_set_phys_id(dev, ETHTOOL_ID_ON); ++ else ++ tg3_set_phys_id(dev, ETHTOOL_ID_OFF); ++ ++ if (msleep_interruptible(500)) ++ break; ++ } ++ tg3_set_phys_id(dev, ETHTOOL_ID_INACTIVE); ++ return 0; ++} ++#endif /* BCM_HAS_SET_PHYS_ID */ ++ ++#ifndef BCM_HAS_GET_STATS64 ++static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev, ++ struct rtnl_link_stats64 *stats); ++static struct rtnl_link_stats64 *tg3_get_stats(struct net_device *dev) ++{ ++ struct tg3 *tp = netdev_priv(dev); ++ return tg3_get_stats64(dev, &tp->net_stats); ++} ++#endif /* BCM_HAS_GET_STATS64 */ ++ ++#ifdef BCM_HAS_GET_RXFH_INDIR ++#ifndef BCM_HAS_GET_RXFH_INDIR_SIZE ++static int tg3_get_rxfh_indir(struct net_device *dev, ++ struct ethtool_rxfh_indir *indir) ++{ ++ struct tg3 *tp = netdev_priv(dev); ++ int i; ++ ++ if (!tg3_flag(tp, SUPPORT_MSIX)) ++ return -EINVAL; ++ ++ if (!indir->size) { ++ indir->size = TG3_RSS_INDIR_TBL_SIZE; ++ return 0; ++ } ++ ++ if (indir->size != TG3_RSS_INDIR_TBL_SIZE) ++ return -EINVAL; ++ ++ for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) ++ indir->ring_index[i] = tp->rss_ind_tbl[i]; ++ ++ return 0; ++} ++ ++static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp, u32 qcnt); ++static void tg3_rss_write_indir_tbl(struct tg3 *tp); ++static inline void tg3_full_lock(struct tg3 *tp, int irq_sync); ++static inline void tg3_full_unlock(struct tg3 *tp); ++ ++static int tg3_set_rxfh_indir(struct net_device *dev, ++ const struct ethtool_rxfh_indir *indir) ++{ ++ struct tg3 *tp = netdev_priv(dev); ++ size_t i; ++ ++ if (!tg3_flag(tp, SUPPORT_MSIX)) ++ return -EINVAL; ++ ++ if (!indir->size) { ++ tg3_flag_clear(tp, USER_INDIR_TBL); ++ tg3_rss_init_dflt_indir_tbl(tp, tp->rxq_cnt); ++ } else { ++ int limit; ++ ++ /* Validate size and indices */ ++ if (indir->size != TG3_RSS_INDIR_TBL_SIZE) ++ return -EINVAL; ++ ++ if (netif_running(dev)) ++ limit = tp->irq_cnt; ++ else { ++ limit = num_online_cpus(); ++ if (limit > TG3_IRQ_MAX_VECS_RSS) ++ limit = TG3_IRQ_MAX_VECS_RSS; ++ } ++ ++ /* The first interrupt vector only ++ * handles link interrupts. ++ */ ++ limit -= 1; ++ ++ /* Check the indices in the table. ++ * Leave the existing table unmodified ++ * if an error is detected. ++ */ ++ for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) ++ if (indir->ring_index[i] >= limit) ++ return -EINVAL; ++ ++ tg3_flag_set(tp, USER_INDIR_TBL); ++ ++ for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) ++ tp->rss_ind_tbl[i] = indir->ring_index[i]; ++ } ++ ++ if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS)) ++ return 0; ++ ++ /* It is legal to write the indirection ++ * table while the device is running. ++ */ ++ tg3_full_lock(tp, 0); ++ tg3_rss_write_indir_tbl(tp); ++ tg3_full_unlock(tp); ++ ++ return 0; ++} ++#endif /* !BCM_HAS_GET_RXFH_INDIR_SIZE */ ++#endif /* BCM_HAS_GET_RXFH_INDIR */ ++ ++#ifdef __VMKLNX__ ++ ++/** ++ * skb_copy_expand - copy and expand sk_buff ++ * @skb: buffer to copy ++ * @newheadroom: new free bytes at head ++ * @newtailroom: new free bytes at tail ++ * @gfp_mask: allocation priority ++ * ++ * Make a copy of both an &sk_buff and its data and while doing so ++ * allocate additional space. ++ * ++ * This is used when the caller wishes to modify the data and needs a ++ * private copy of the data to alter as well as more space for new fields. ++ * Returns %NULL on failure or the pointer to the buffer ++ * on success. The returned buffer has a reference count of 1. ++ * ++ * You must pass %GFP_ATOMIC as the allocation priority if this function ++ * is called from an interrupt. ++ */ ++struct sk_buff *skb_copy_expand(const struct sk_buff *skb, ++ int newheadroom, int newtailroom, ++ gfp_t gfp_mask) ++{ ++ int rc; ++ struct sk_buff *new_skb = skb_copy((struct sk_buff *) skb, gfp_mask); ++ ++ if(new_skb == NULL) ++ return NULL; ++ ++ rc = pskb_expand_head(new_skb, newheadroom, newtailroom, gfp_mask); ++ ++ if(rc != 0) ++ return NULL; ++ ++ return new_skb; ++} ++ ++void *memmove(void *dest, const void *src, size_t count) ++{ ++ if (dest < src) { ++ return memcpy(dest, src, count); ++ } else { ++ char *p = dest + count; ++ const char *s = src + count; ++ while (count--) ++ *--p = *--s; ++ } ++ return dest; ++} ++#endif +diff --git a/drivers/net/ethernet/broadcom/tg3/tg3_firmware.h b/drivers/net/ethernet/broadcom/tg3/tg3_firmware.h +new file mode 100644 +index 0000000..a5a4928 +--- /dev/null ++++ b/drivers/net/ethernet/broadcom/tg3/tg3_firmware.h +@@ -0,0 +1,1012 @@ ++/* Copyright (C) 2009-2015 Broadcom Corporation. */ ++ ++#ifdef NETIF_F_TSO ++#define TG3_TSO_SUPPORT 1 ++#else ++#define TG3_TSO_SUPPORT 0 ++#endif ++ ++#ifndef BCM_HAS_REQUEST_FIRMWARE ++ ++struct tg3_firmware { ++ size_t size; ++ const u8 *data; ++}; ++ ++struct tg3_firmware_hdr { ++ u32 version; /* unused for fragments */ ++ u32 base_addr; ++ u32 len; ++}; ++#define TG3_FW_HDR_LEN (sizeof(struct tg3_firmware_hdr)) ++ ++#ifndef MODULE_FIRMWARE ++#define MODULE_FIRMWARE(x) ++#endif ++ ++#define TG3_FW_RELEASE_MAJOR 0x0 ++#define TG3_FW_RELASE_MINOR 0x0 ++#define TG3_FW_RELEASE_FIX 0x0 ++#define TG3_FW_START_ADDR 0x08000000 ++#define TG3_FW_TEXT_ADDR 0x08000000 ++#define TG3_FW_TEXT_LEN 0x9c0 ++#define TG3_FW_RODATA_ADDR 0x080009c0 ++#define TG3_FW_RODATA_LEN 0x60 ++#define TG3_FW_DATA_ADDR 0x08000a40 ++#define TG3_FW_DATA_LEN 0x20 ++#define TG3_FW_SBSS_ADDR 0x08000a60 ++#define TG3_FW_SBSS_LEN 0xc ++#define TG3_FW_BSS_ADDR 0x08000a70 ++#define TG3_FW_BSS_LEN 0x10 ++ ++#define TG3_5701_RLS_FW_LEN (TG3_FW_TEXT_LEN + TG3_FW_RODATA_LEN) ++ ++static const u32 tg3FwText[] = { ++0x00000000, (u32)TG3_FW_TEXT_ADDR, (u32)TG3_5701_RLS_FW_LEN, ++0x00000000, 0x10000003, 0x00000000, 0x0000000d, ++0x0000000d, 0x3c1d0800, 0x37bd3ffc, 0x03a0f021, ++0x3c100800, 0x26100000, 0x0e000018, 0x00000000, ++0x0000000d, 0x3c1d0800, 0x37bd3ffc, 0x03a0f021, ++0x3c100800, 0x26100034, 0x0e00021c, 0x00000000, ++0x0000000d, 0x00000000, 0x00000000, 0x00000000, ++0x27bdffe0, 0x3c1cc000, 0xafbf0018, 0xaf80680c, ++0x0e00004c, 0x241b2105, 0x97850000, 0x97870002, ++0x9782002c, 0x9783002e, 0x3c040800, 0x248409c0, ++0xafa00014, 0x00021400, 0x00621825, 0x00052c00, ++0xafa30010, 0x8f860010, 0x00e52825, 0x0e000060, ++0x24070102, 0x3c02ac00, 0x34420100, 0x3c03ac01, ++0x34630100, 0xaf820490, 0x3c02ffff, 0xaf820494, ++0xaf830498, 0xaf82049c, 0x24020001, 0xaf825ce0, ++0x0e00003f, 0xaf825d00, 0x0e000140, 0x00000000, ++0x8fbf0018, 0x03e00008, 0x27bd0020, 0x2402ffff, ++0xaf825404, 0x8f835400, 0x34630400, 0xaf835400, ++0xaf825404, 0x3c020800, 0x24420034, 0xaf82541c, ++0x03e00008, 0xaf805400, 0x00000000, 0x00000000, ++0x3c020800, 0x34423000, 0x3c030800, 0x34633000, ++0x3c040800, 0x348437ff, 0x3c010800, 0xac220a64, ++0x24020040, 0x3c010800, 0xac220a68, 0x3c010800, ++0xac200a60, 0xac600000, 0x24630004, 0x0083102b, ++0x5040fffd, 0xac600000, 0x03e00008, 0x00000000, ++0x00804821, 0x8faa0010, 0x3c020800, 0x8c420a60, ++0x3c040800, 0x8c840a68, 0x8fab0014, 0x24430001, ++0x0044102b, 0x3c010800, 0xac230a60, 0x14400003, ++0x00004021, 0x3c010800, 0xac200a60, 0x3c020800, ++0x8c420a60, 0x3c030800, 0x8c630a64, 0x91240000, ++0x00021140, 0x00431021, 0x00481021, 0x25080001, ++0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, ++0x3c020800, 0x8c420a60, 0x3c030800, 0x8c630a64, ++0x8f84680c, 0x00021140, 0x00431021, 0xac440008, ++0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, ++0x03e00008, 0xac4b001c, 0x00000000, 0x00000000, ++0x00000000, 0x00000000, 0x00000000, 0x00000000, ++0x00000000, 0x00000000, 0x00000000, 0x00000000, ++0x00000000, 0x00000000, 0x00000000, 0x00000000, ++0x00000000, 0x00000000, 0x00000000, 0x00000000, ++0x00000000, 0x00000000, 0x00000000, 0x00000000, ++0x00000000, 0x00000000, 0x00000000, 0x00000000, ++0x00000000, 0x00000000, 0x00000000, 0x00000000, ++0x00000000, 0x00000000, 0x00000000, 0x00000000, ++0x00000000, 0x00000000, 0x00000000, 0x00000000, ++0x00000000, 0x00000000, 0x00000000, 0x00000000, ++0x00000000, 0x00000000, 0x00000000, 0x00000000, ++0x00000000, 0x00000000, 0x00000000, 0x00000000, ++0x00000000, 0x00000000, 0x00000000, 0x00000000, ++0x02000008, 0x00000000, 0x0a0001e3, 0x3c0a0001, ++0x0a0001e3, 0x3c0a0002, 0x0a0001e3, 0x00000000, ++0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, ++0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, ++0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, ++0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, ++0x0a0001e3, 0x3c0a0007, 0x0a0001e3, 0x3c0a0008, ++0x0a0001e3, 0x3c0a0009, 0x0a0001e3, 0x00000000, ++0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000b, ++0x0a0001e3, 0x3c0a000c, 0x0a0001e3, 0x3c0a000d, ++0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, ++0x0a0001e3, 0x3c0a000e, 0x0a0001e3, 0x00000000, ++0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, ++0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, ++0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, ++0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a0013, ++0x0a0001e3, 0x3c0a0014, 0x00000000, 0x00000000, ++0x00000000, 0x00000000, 0x00000000, 0x00000000, ++0x00000000, 0x00000000, 0x00000000, 0x00000000, ++0x00000000, 0x00000000, 0x00000000, 0x00000000, ++0x00000000, 0x00000000, 0x00000000, 0x00000000, ++0x00000000, 0x00000000, 0x00000000, 0x00000000, ++0x00000000, 0x00000000, 0x00000000, 0x00000000, ++0x00000000, 0x00000000, 0x00000000, 0x00000000, ++0x00000000, 0x00000000, 0x00000000, 0x00000000, ++0x00000000, 0x00000000, 0x00000000, 0x00000000, ++0x00000000, 0x00000000, 0x00000000, 0x00000000, ++0x00000000, 0x00000000, 0x00000000, 0x00000000, ++0x00000000, 0x00000000, 0x00000000, 0x00000000, ++0x00000000, 0x00000000, 0x00000000, 0x00000000, ++0x00000000, 0x00000000, 0x00000000, 0x00000000, ++0x00000000, 0x00000000, 0x00000000, 0x00000000, ++0x27bdffe0, 0x00001821, 0x00001021, 0xafbf0018, ++0xafb10014, 0xafb00010, 0x3c010800, 0x00220821, ++0xac200a70, 0x3c010800, 0x00220821, 0xac200a74, ++0x3c010800, 0x00220821, 0xac200a78, 0x24630001, ++0x1860fff5, 0x2442000c, 0x24110001, 0x8f906810, ++0x32020004, 0x14400005, 0x24040001, 0x3c020800, ++0x8c420a78, 0x18400003, 0x00002021, 0x0e000182, ++0x00000000, 0x32020001, 0x10400003, 0x00000000, ++0x0e000169, 0x00000000, 0x0a000153, 0xaf915028, ++0x8fbf0018, 0x8fb10014, 0x8fb00010, 0x03e00008, ++0x27bd0020, 0x3c050800, 0x8ca50a70, 0x3c060800, ++0x8cc60a80, 0x3c070800, 0x8ce70a78, 0x27bdffe0, ++0x3c040800, 0x248409d0, 0xafbf0018, 0xafa00010, ++0x0e000060, 0xafa00014, 0x0e00017b, 0x00002021, ++0x8fbf0018, 0x03e00008, 0x27bd0020, 0x24020001, ++0x8f836810, 0x00821004, 0x00021027, 0x00621824, ++0x03e00008, 0xaf836810, 0x27bdffd8, 0xafbf0024, ++0x1080002e, 0xafb00020, 0x8f825cec, 0xafa20018, ++0x8f825cec, 0x3c100800, 0x26100a78, 0xafa2001c, ++0x34028000, 0xaf825cec, 0x8e020000, 0x18400016, ++0x00000000, 0x3c020800, 0x94420a74, 0x8fa3001c, ++0x000221c0, 0xac830004, 0x8fa2001c, 0x3c010800, ++0x0e000201, 0xac220a74, 0x10400005, 0x00000000, ++0x8e020000, 0x24420001, 0x0a0001df, 0xae020000, ++0x3c020800, 0x8c420a70, 0x00021c02, 0x000321c0, ++0x0a0001c5, 0xafa2001c, 0x0e000201, 0x00000000, ++0x1040001f, 0x00000000, 0x8e020000, 0x8fa3001c, ++0x24420001, 0x3c010800, 0xac230a70, 0x3c010800, ++0xac230a74, 0x0a0001df, 0xae020000, 0x3c100800, ++0x26100a78, 0x8e020000, 0x18400028, 0x00000000, ++0x0e000201, 0x00000000, 0x14400024, 0x00000000, ++0x8e020000, 0x3c030800, 0x8c630a70, 0x2442ffff, ++0xafa3001c, 0x18400006, 0xae020000, 0x00031402, ++0x000221c0, 0x8c820004, 0x3c010800, 0xac220a70, ++0x97a2001e, 0x2442ff00, 0x2c420300, 0x1440000b, ++0x24024000, 0x3c040800, 0x248409dc, 0xafa00010, ++0xafa00014, 0x8fa6001c, 0x24050008, 0x0e000060, ++0x00003821, 0x0a0001df, 0x00000000, 0xaf825cf8, ++0x3c020800, 0x8c420a40, 0x8fa3001c, 0x24420001, ++0xaf835cf8, 0x3c010800, 0xac220a40, 0x8fbf0024, ++0x8fb00020, 0x03e00008, 0x27bd0028, 0x27bdffe0, ++0x3c040800, 0x248409e8, 0x00002821, 0x00003021, ++0x00003821, 0xafbf0018, 0xafa00010, 0x0e000060, ++0xafa00014, 0x8fbf0018, 0x03e00008, 0x27bd0020, ++0x8f82680c, 0x8f85680c, 0x00021827, 0x0003182b, ++0x00031823, 0x00431024, 0x00441021, 0x00a2282b, ++0x10a00006, 0x00000000, 0x00401821, 0x8f82680c, ++0x0043102b, 0x1440fffd, 0x00000000, 0x03e00008, ++0x00000000, 0x3c040800, 0x8c840000, 0x3c030800, ++0x8c630a40, 0x0064102b, 0x54400002, 0x00831023, ++0x00641023, 0x2c420008, 0x03e00008, 0x38420001, ++0x27bdffe0, 0x00802821, 0x3c040800, 0x24840a00, ++0x00003021, 0x00003821, 0xafbf0018, 0xafa00010, ++0x0e000060, 0xafa00014, 0x0a000216, 0x00000000, ++0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000, ++0x27bdffe0, 0x3c1cc000, 0xafbf0018, 0x0e00004c, ++0xaf80680c, 0x3c040800, 0x24840a10, 0x03802821, ++0x00003021, 0x00003821, 0xafa00010, 0x0e000060, ++0xafa00014, 0x2402ffff, 0xaf825404, 0x3c0200aa, ++0x0e000234, 0xaf825434, 0x8fbf0018, 0x03e00008, ++0x27bd0020, 0x00000000, 0x00000000, 0x00000000, ++0x27bdffe8, 0xafb00010, 0x24100001, 0xafbf0014, ++0x3c01c003, 0xac200000, 0x8f826810, 0x30422000, ++0x10400003, 0x00000000, 0x0e000246, 0x00000000, ++0x0a00023a, 0xaf905428, 0x8fbf0014, 0x8fb00010, ++0x03e00008, 0x27bd0018, 0x27bdfff8, 0x8f845d0c, ++0x3c0200ff, 0x3c030800, 0x8c630a50, 0x3442fff8, ++0x00821024, 0x1043001e, 0x3c0500ff, 0x34a5fff8, ++0x3c06c003, 0x3c074000, 0x00851824, 0x8c620010, ++0x3c010800, 0xac230a50, 0x30420008, 0x10400005, ++0x00871025, 0x8cc20000, 0x24420001, 0xacc20000, ++0x00871025, 0xaf825d0c, 0x8fa20000, 0x24420001, ++0xafa20000, 0x8fa20000, 0x8fa20000, 0x24420001, ++0xafa20000, 0x8fa20000, 0x8f845d0c, 0x3c030800, ++0x8c630a50, 0x00851024, 0x1443ffe8, 0x00851824, ++0x27bd0008, 0x03e00008, 0x00000000, 0x00000000, ++0x35373031, 0x726c7341, 0x00000000, 0x00000000, ++0x53774576, 0x656e7430, 0x00000000, 0x726c7045, ++0x76656e74, 0x31000000, 0x556e6b6e, 0x45766e74, ++0x00000000, 0x00000000, 0x00000000, 0x00000000, ++0x66617461, 0x6c457272, 0x00000000, 0x00000000, ++0x4d61696e, 0x43707542, 0x00000000, 0x00000000, ++}; ++ ++static const struct tg3_firmware tg3_5701_fw = { ++ .size = TG3_5701_RLS_FW_LEN, ++ .data = (u8 *)&tg3FwText[0], ++}; ++ ++#define TG3_57766_FW_BASE_ADDR 0x00030000 ++#define TG3_57766_FW_HANDSHAKE 0x0003fccc ++#define TG3_57766_FW_TEXT_ADDR 0x00030000 ++#define TG3_57766_FW_TEXT_LEN (0x58 + TG3_FW_HDR_LEN) ++#define TG3_57766_FW_PRIV1_ADDR 0x0003fd00 ++#define TG3_57766_FW_PRIV1_SIZE (0x4 + TG3_FW_HDR_LEN) ++#define TG3_57766_FW_PRIV2_ADDR 0x0003fccc ++#define TG3_57766_FW_PRIV2_SIZE (0x4 + TG3_FW_HDR_LEN) ++#define TG3_57766_FW_RESERVED 0xdecafbad ++ ++static const u32 tg3_57766_fwdata[] = { ++0x00000000, TG3_57766_FW_BASE_ADDR, 0xffffffff, ++TG3_57766_FW_RESERVED, TG3_57766_FW_TEXT_ADDR, TG3_57766_FW_TEXT_LEN, ++0x27800001, 0xf7f0403e, 0xcd283674, 0x11001100, ++0xf7ff1064, 0x376e0001, 0x27600000, 0xf7f07fea, ++0xf7f00004, 0xf7f00018, 0xcc10362c, 0x00180018, ++0x17800000, 0xf7f00008, 0xc33836b0, 0xf7f00004, ++0xc43836b0, 0xc62036bc, 0x00000009, 0xcb3836b0, ++0x17800001, 0x1760000a, ++TG3_57766_FW_RESERVED, TG3_57766_FW_PRIV1_ADDR, TG3_57766_FW_PRIV1_SIZE, ++0xd044d816, ++TG3_57766_FW_RESERVED, TG3_57766_FW_PRIV2_ADDR, TG3_57766_FW_PRIV2_SIZE, ++0x02300202, ++}; ++ ++static const struct tg3_firmware tg3_57766_fw = { ++ .size = sizeof(tg3_57766_fwdata), ++ .data = (u8 *)&tg3_57766_fwdata[0], ++}; ++ ++#if TG3_TSO_SUPPORT != 0 ++ ++#define TG3_TSO_FW_RELEASE_MAJOR 0x1 ++#define TG3_TSO_FW_RELASE_MINOR 0x6 ++#define TG3_TSO_FW_RELEASE_FIX 0x0 ++#define TG3_TSO_FW_START_ADDR 0x08000000 ++#define TG3_TSO_FW_TEXT_ADDR 0x08000000 ++#define TG3_TSO_FW_TEXT_LEN 0x1aa0 ++#define TG3_TSO_FW_RODATA_ADDR 0x08001aa0 ++#define TG3_TSO_FW_RODATA_LEN 0x60 ++#define TG3_TSO_FW_DATA_ADDR 0x08001b20 ++#define TG3_TSO_FW_DATA_LEN 0x30 ++#define TG3_TSO_FW_SBSS_ADDR 0x08001b50 ++#define TG3_TSO_FW_SBSS_LEN 0x2c ++#define TG3_TSO_FW_BSS_ADDR 0x08001b80 ++#define TG3_TSO_FW_BSS_LEN 0x894 ++ ++#define TG3_LGCY_TSO_FW_LEN \ ++ (TG3_TSO_FW_TEXT_LEN + \ ++ TG3_TSO_FW_RODATA_LEN + \ ++ 0x20 + \ ++ TG3_TSO_FW_DATA_LEN) ++ ++static const u32 tg3TsoFwText[] = { ++0x00010600, (u32)TG3_TSO_FW_TEXT_ADDR, (u32)TG3_LGCY_TSO_FW_LEN, ++0x0e000003, 0x00000000, 0x08001b24, 0x00000000, ++0x10000003, 0x00000000, 0x0000000d, 0x0000000d, ++0x3c1d0800, 0x37bd4000, 0x03a0f021, 0x3c100800, ++0x26100000, 0x0e000010, 0x00000000, 0x0000000d, ++0x27bdffe0, 0x3c04fefe, 0xafbf0018, 0x0e0005d8, ++0x34840002, 0x0e000668, 0x00000000, 0x3c030800, ++0x90631b68, 0x24020002, 0x3c040800, 0x24841aac, ++0x14620003, 0x24050001, 0x3c040800, 0x24841aa0, ++0x24060006, 0x00003821, 0xafa00010, 0x0e00067c, ++0xafa00014, 0x8f625c50, 0x34420001, 0xaf625c50, ++0x8f625c90, 0x34420001, 0xaf625c90, 0x2402ffff, ++0x0e000034, 0xaf625404, 0x8fbf0018, 0x03e00008, ++0x27bd0020, 0x00000000, 0x00000000, 0x00000000, ++0x27bdffe0, 0xafbf001c, 0xafb20018, 0xafb10014, ++0x0e00005b, 0xafb00010, 0x24120002, 0x24110001, ++0x8f706820, 0x32020100, 0x10400003, 0x00000000, ++0x0e0000bb, 0x00000000, 0x8f706820, 0x32022000, ++0x10400004, 0x32020001, 0x0e0001f0, 0x24040001, ++0x32020001, 0x10400003, 0x00000000, 0x0e0000a3, ++0x00000000, 0x3c020800, 0x90421b98, 0x14520003, ++0x00000000, 0x0e0004c0, 0x00000000, 0x0a00003c, ++0xaf715028, 0x8fbf001c, 0x8fb20018, 0x8fb10014, ++0x8fb00010, 0x03e00008, 0x27bd0020, 0x27bdffe0, ++0x3c040800, 0x24841ac0, 0x00002821, 0x00003021, ++0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, ++0xafa00014, 0x3c040800, 0x248423d8, 0xa4800000, ++0x3c010800, 0xa0201b98, 0x3c010800, 0xac201b9c, ++0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, ++0x3c010800, 0xac201bac, 0x3c010800, 0xac201bb8, ++0x3c010800, 0xac201bbc, 0x8f624434, 0x3c010800, ++0xac221b88, 0x8f624438, 0x3c010800, 0xac221b8c, ++0x8f624410, 0xac80f7a8, 0x3c010800, 0xac201b84, ++0x3c010800, 0xac2023e0, 0x3c010800, 0xac2023c8, ++0x3c010800, 0xac2023cc, 0x3c010800, 0xac202400, ++0x3c010800, 0xac221b90, 0x8f620068, 0x24030007, ++0x00021702, 0x10430005, 0x00000000, 0x8f620068, ++0x00021702, 0x14400004, 0x24020001, 0x3c010800, ++0x0a000097, 0xac20240c, 0xac820034, 0x3c040800, ++0x24841acc, 0x3c050800, 0x8ca5240c, 0x00003021, ++0x00003821, 0xafa00010, 0x0e00067c, 0xafa00014, ++0x8fbf0018, 0x03e00008, 0x27bd0020, 0x27bdffe0, ++0x3c040800, 0x24841ad8, 0x00002821, 0x00003021, ++0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, ++0xafa00014, 0x0e00005b, 0x00000000, 0x0e0000b4, ++0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020, ++0x24020001, 0x8f636820, 0x00821004, 0x00021027, ++0x00621824, 0x03e00008, 0xaf636820, 0x27bdffd0, ++0xafbf002c, 0xafb60028, 0xafb50024, 0xafb40020, ++0xafb3001c, 0xafb20018, 0xafb10014, 0xafb00010, ++0x8f675c5c, 0x3c030800, 0x24631bbc, 0x8c620000, ++0x14470005, 0x3c0200ff, 0x3c020800, 0x90421b98, ++0x14400119, 0x3c0200ff, 0x3442fff8, 0x00e28824, ++0xac670000, 0x00111902, 0x306300ff, 0x30e20003, ++0x000211c0, 0x00622825, 0x00a04021, 0x00071602, ++0x3c030800, 0x90631b98, 0x3044000f, 0x14600036, ++0x00804821, 0x24020001, 0x3c010800, 0xa0221b98, ++0x00051100, 0x00821025, 0x3c010800, 0xac201b9c, ++0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, ++0x3c010800, 0xac201bac, 0x3c010800, 0xac201bb8, ++0x3c010800, 0xac201bb0, 0x3c010800, 0xac201bb4, ++0x3c010800, 0xa42223d8, 0x9622000c, 0x30437fff, ++0x3c010800, 0xa4222410, 0x30428000, 0x3c010800, ++0xa4231bc6, 0x10400005, 0x24020001, 0x3c010800, ++0xac2223f4, 0x0a000102, 0x2406003e, 0x24060036, ++0x3c010800, 0xac2023f4, 0x9622000a, 0x3c030800, ++0x94631bc6, 0x3c010800, 0xac2023f0, 0x3c010800, ++0xac2023f8, 0x00021302, 0x00021080, 0x00c21021, ++0x00621821, 0x3c010800, 0xa42223d0, 0x3c010800, ++0x0a000115, 0xa4231b96, 0x9622000c, 0x3c010800, ++0xa42223ec, 0x3c040800, 0x24841b9c, 0x8c820000, ++0x00021100, 0x3c010800, 0x00220821, 0xac311bc8, ++0x8c820000, 0x00021100, 0x3c010800, 0x00220821, ++0xac271bcc, 0x8c820000, 0x25030001, 0x306601ff, ++0x00021100, 0x3c010800, 0x00220821, 0xac261bd0, ++0x8c820000, 0x00021100, 0x3c010800, 0x00220821, ++0xac291bd4, 0x96230008, 0x3c020800, 0x8c421bac, ++0x00432821, 0x3c010800, 0xac251bac, 0x9622000a, ++0x30420004, 0x14400018, 0x00061100, 0x8f630c14, ++0x3063000f, 0x2c620002, 0x1440000b, 0x3c02c000, ++0x8f630c14, 0x3c020800, 0x8c421b40, 0x3063000f, ++0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, ++0x1040fff7, 0x3c02c000, 0x00e21825, 0xaf635c5c, ++0x8f625c50, 0x30420002, 0x10400014, 0x00000000, ++0x0a000147, 0x00000000, 0x3c030800, 0x8c631b80, ++0x3c040800, 0x94841b94, 0x01221025, 0x3c010800, ++0xa42223da, 0x24020001, 0x3c010800, 0xac221bb8, ++0x24630001, 0x0085202a, 0x3c010800, 0x10800003, ++0xac231b80, 0x3c010800, 0xa4251b94, 0x3c060800, ++0x24c61b9c, 0x8cc20000, 0x24420001, 0xacc20000, ++0x28420080, 0x14400005, 0x00000000, 0x0e000656, ++0x24040002, 0x0a0001e6, 0x00000000, 0x3c020800, ++0x8c421bb8, 0x10400078, 0x24020001, 0x3c050800, ++0x90a51b98, 0x14a20072, 0x00000000, 0x3c150800, ++0x96b51b96, 0x3c040800, 0x8c841bac, 0x32a3ffff, ++0x0083102a, 0x1440006c, 0x00000000, 0x14830003, ++0x00000000, 0x3c010800, 0xac2523f0, 0x1060005c, ++0x00009021, 0x24d60004, 0x0060a021, 0x24d30014, ++0x8ec20000, 0x00028100, 0x3c110800, 0x02308821, ++0x0e000625, 0x8e311bc8, 0x00402821, 0x10a00054, ++0x00000000, 0x9628000a, 0x31020040, 0x10400005, ++0x2407180c, 0x8e22000c, 0x2407188c, 0x00021400, ++0xaca20018, 0x3c030800, 0x00701821, 0x8c631bd0, ++0x3c020800, 0x00501021, 0x8c421bd4, 0x00031d00, ++0x00021400, 0x00621825, 0xaca30014, 0x8ec30004, ++0x96220008, 0x00432023, 0x3242ffff, 0x3083ffff, ++0x00431021, 0x0282102a, 0x14400002, 0x02b23023, ++0x00803021, 0x8e620000, 0x30c4ffff, 0x00441021, ++0xae620000, 0x8e220000, 0xaca20000, 0x8e220004, ++0x8e63fff4, 0x00431021, 0xaca20004, 0xa4a6000e, ++0x8e62fff4, 0x00441021, 0xae62fff4, 0x96230008, ++0x0043102a, 0x14400005, 0x02469021, 0x8e62fff0, ++0xae60fff4, 0x24420001, 0xae62fff0, 0xaca00008, ++0x3242ffff, 0x14540008, 0x24020305, 0x31020080, ++0x54400001, 0x34e70010, 0x24020905, 0xa4a2000c, ++0x0a0001cb, 0x34e70020, 0xa4a2000c, 0x3c020800, ++0x8c4223f0, 0x10400003, 0x3c024b65, 0x0a0001d3, ++0x34427654, 0x3c02b49a, 0x344289ab, 0xaca2001c, ++0x30e2ffff, 0xaca20010, 0x0e0005a2, 0x00a02021, ++0x3242ffff, 0x0054102b, 0x1440ffa9, 0x00000000, ++0x24020002, 0x3c010800, 0x0a0001e6, 0xa0221b98, ++0x8ec2083c, 0x24420001, 0x0a0001e6, 0xaec2083c, ++0x0e0004c0, 0x00000000, 0x8fbf002c, 0x8fb60028, ++0x8fb50024, 0x8fb40020, 0x8fb3001c, 0x8fb20018, ++0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0030, ++0x27bdffd0, 0xafbf0028, 0xafb30024, 0xafb20020, ++0xafb1001c, 0xafb00018, 0x8f725c9c, 0x3c0200ff, ++0x3442fff8, 0x3c070800, 0x24e71bb4, 0x02428824, ++0x9623000e, 0x8ce20000, 0x00431021, 0xace20000, ++0x8e220010, 0x30420020, 0x14400011, 0x00809821, ++0x0e00063b, 0x02202021, 0x3c02c000, 0x02421825, ++0xaf635c9c, 0x8f625c90, 0x30420002, 0x1040011e, ++0x00000000, 0xaf635c9c, 0x8f625c90, 0x30420002, ++0x10400119, 0x00000000, 0x0a00020d, 0x00000000, ++0x8e240008, 0x8e230014, 0x00041402, 0x000231c0, ++0x00031502, 0x304201ff, 0x2442ffff, 0x3042007f, ++0x00031942, 0x30637800, 0x00021100, 0x24424000, ++0x00624821, 0x9522000a, 0x3084ffff, 0x30420008, ++0x104000b0, 0x000429c0, 0x3c020800, 0x8c422400, ++0x14400024, 0x24c50008, 0x94c20014, 0x3c010800, ++0xa42223d0, 0x8cc40010, 0x00041402, 0x3c010800, ++0xa42223d2, 0x3c010800, 0xa42423d4, 0x94c2000e, ++0x3083ffff, 0x00431023, 0x3c010800, 0xac222408, ++0x94c2001a, 0x3c010800, 0xac262400, 0x3c010800, ++0xac322404, 0x3c010800, 0xac2223fc, 0x3c02c000, ++0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002, ++0x104000e5, 0x00000000, 0xaf635c9c, 0x8f625c90, ++0x30420002, 0x104000e0, 0x00000000, 0x0a000246, ++0x00000000, 0x94c2000e, 0x3c030800, 0x946323d4, ++0x00434023, 0x3103ffff, 0x2c620008, 0x1040001c, ++0x00000000, 0x94c20014, 0x24420028, 0x00a22821, ++0x00031042, 0x1840000b, 0x00002021, 0x24e60848, ++0x00403821, 0x94a30000, 0x8cc20000, 0x24840001, ++0x00431021, 0xacc20000, 0x0087102a, 0x1440fff9, ++0x24a50002, 0x31020001, 0x1040001f, 0x3c024000, ++0x3c040800, 0x248423fc, 0xa0a00001, 0x94a30000, ++0x8c820000, 0x00431021, 0x0a000285, 0xac820000, ++0x8f626800, 0x3c030010, 0x00431024, 0x10400009, ++0x00000000, 0x94c2001a, 0x3c030800, 0x8c6323fc, ++0x00431021, 0x3c010800, 0xac2223fc, 0x0a000286, ++0x3c024000, 0x94c2001a, 0x94c4001c, 0x3c030800, ++0x8c6323fc, 0x00441023, 0x00621821, 0x3c010800, ++0xac2323fc, 0x3c024000, 0x02421825, 0xaf635c9c, ++0x8f625c90, 0x30420002, 0x1440fffc, 0x00000000, ++0x9522000a, 0x30420010, 0x1040009b, 0x00000000, ++0x3c030800, 0x946323d4, 0x3c070800, 0x24e72400, ++0x8ce40000, 0x8f626800, 0x24630030, 0x00832821, ++0x3c030010, 0x00431024, 0x1440000a, 0x00000000, ++0x94a20004, 0x3c040800, 0x8c842408, 0x3c030800, ++0x8c6323fc, 0x00441023, 0x00621821, 0x3c010800, ++0xac2323fc, 0x3c040800, 0x8c8423fc, 0x00041c02, ++0x3082ffff, 0x00622021, 0x00041402, 0x00822021, ++0x00041027, 0xa4a20006, 0x3c030800, 0x8c632404, ++0x3c0200ff, 0x3442fff8, 0x00628824, 0x96220008, ++0x24050001, 0x24034000, 0x000231c0, 0x00801021, ++0xa4c2001a, 0xa4c0001c, 0xace00000, 0x3c010800, ++0xac251b60, 0xaf635cb8, 0x8f625cb0, 0x30420002, ++0x10400003, 0x00000000, 0x3c010800, 0xac201b60, ++0x8e220008, 0xaf625cb8, 0x8f625cb0, 0x30420002, ++0x10400003, 0x00000000, 0x3c010800, 0xac201b60, ++0x3c020800, 0x8c421b60, 0x1040ffec, 0x00000000, ++0x3c040800, 0x0e00063b, 0x8c842404, 0x0a00032a, ++0x00000000, 0x3c030800, 0x90631b98, 0x24020002, ++0x14620003, 0x3c034b65, 0x0a0002e1, 0x00008021, ++0x8e22001c, 0x34637654, 0x10430002, 0x24100002, ++0x24100001, 0x00c02021, 0x0e000350, 0x02003021, ++0x24020003, 0x3c010800, 0xa0221b98, 0x24020002, ++0x1202000a, 0x24020001, 0x3c030800, 0x8c6323f0, ++0x10620006, 0x00000000, 0x3c020800, 0x944223d8, ++0x00021400, 0x0a00031f, 0xae220014, 0x3c040800, ++0x248423da, 0x94820000, 0x00021400, 0xae220014, ++0x3c020800, 0x8c421bbc, 0x3c03c000, 0x3c010800, ++0xa0201b98, 0x00431025, 0xaf625c5c, 0x8f625c50, ++0x30420002, 0x10400009, 0x00000000, 0x2484f7e2, ++0x8c820000, 0x00431025, 0xaf625c5c, 0x8f625c50, ++0x30420002, 0x1440fffa, 0x00000000, 0x3c020800, ++0x24421b84, 0x8c430000, 0x24630001, 0xac430000, ++0x8f630c14, 0x3063000f, 0x2c620002, 0x1440000c, ++0x3c024000, 0x8f630c14, 0x3c020800, 0x8c421b40, ++0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, ++0x2c620002, 0x1040fff7, 0x00000000, 0x3c024000, ++0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002, ++0x1440fffc, 0x00000000, 0x12600003, 0x00000000, ++0x0e0004c0, 0x00000000, 0x8fbf0028, 0x8fb30024, ++0x8fb20020, 0x8fb1001c, 0x8fb00018, 0x03e00008, ++0x27bd0030, 0x8f634450, 0x3c040800, 0x24841b88, ++0x8c820000, 0x00031c02, 0x0043102b, 0x14400007, ++0x3c038000, 0x8c840004, 0x8f624450, 0x00021c02, ++0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, ++0x8f624444, 0x00431024, 0x1440fffd, 0x00000000, ++0x8f624448, 0x03e00008, 0x3042ffff, 0x3c024000, ++0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, ++0x1440fffc, 0x00000000, 0x03e00008, 0x00000000, ++0x27bdffe0, 0x00805821, 0x14c00011, 0x256e0008, ++0x3c020800, 0x8c4223f4, 0x10400007, 0x24020016, ++0x3c010800, 0xa42223d2, 0x2402002a, 0x3c010800, ++0x0a000364, 0xa42223d4, 0x8d670010, 0x00071402, ++0x3c010800, 0xa42223d2, 0x3c010800, 0xa42723d4, ++0x3c040800, 0x948423d4, 0x3c030800, 0x946323d2, ++0x95cf0006, 0x3c020800, 0x944223d0, 0x00832023, ++0x01e2c023, 0x3065ffff, 0x24a20028, 0x01c24821, ++0x3082ffff, 0x14c0001a, 0x01226021, 0x9582000c, ++0x3042003f, 0x3c010800, 0xa42223d6, 0x95820004, ++0x95830006, 0x3c010800, 0xac2023e4, 0x3c010800, ++0xac2023e8, 0x00021400, 0x00431025, 0x3c010800, ++0xac221bc0, 0x95220004, 0x3c010800, 0xa4221bc4, ++0x95230002, 0x01e51023, 0x0043102a, 0x10400010, ++0x24020001, 0x3c010800, 0x0a000398, 0xac2223f8, ++0x3c030800, 0x8c6323e8, 0x3c020800, 0x94421bc4, ++0x00431021, 0xa5220004, 0x3c020800, 0x94421bc0, ++0xa5820004, 0x3c020800, 0x8c421bc0, 0xa5820006, ++0x3c020800, 0x8c4223f0, 0x3c0d0800, 0x8dad23e4, ++0x3c0a0800, 0x144000e5, 0x8d4a23e8, 0x3c020800, ++0x94421bc4, 0x004a1821, 0x3063ffff, 0x0062182b, ++0x24020002, 0x10c2000d, 0x01435023, 0x3c020800, ++0x944223d6, 0x30420009, 0x10400008, 0x00000000, ++0x9582000c, 0x3042fff6, 0xa582000c, 0x3c020800, ++0x944223d6, 0x30420009, 0x01a26823, 0x3c020800, ++0x8c4223f8, 0x1040004a, 0x01203821, 0x3c020800, ++0x944223d2, 0x00004021, 0xa520000a, 0x01e21023, ++0xa5220002, 0x3082ffff, 0x00021042, 0x18400008, ++0x00003021, 0x00401821, 0x94e20000, 0x25080001, ++0x00c23021, 0x0103102a, 0x1440fffb, 0x24e70002, ++0x00061c02, 0x30c2ffff, 0x00623021, 0x00061402, ++0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, ++0x00003021, 0x2527000c, 0x00004021, 0x94e20000, ++0x25080001, 0x00c23021, 0x2d020004, 0x1440fffb, ++0x24e70002, 0x95220002, 0x00004021, 0x91230009, ++0x00442023, 0x01803821, 0x3082ffff, 0xa4e00010, ++0x00621821, 0x00021042, 0x18400010, 0x00c33021, ++0x00404821, 0x94e20000, 0x24e70002, 0x00c23021, ++0x30e2007f, 0x14400006, 0x25080001, 0x8d630000, ++0x3c02007f, 0x3442ff80, 0x00625824, 0x25670008, ++0x0109102a, 0x1440fff3, 0x00000000, 0x30820001, ++0x10400005, 0x00061c02, 0xa0e00001, 0x94e20000, ++0x00c23021, 0x00061c02, 0x30c2ffff, 0x00623021, ++0x00061402, 0x00c23021, 0x0a00047d, 0x30c6ffff, ++0x24020002, 0x14c20081, 0x00000000, 0x3c020800, ++0x8c42240c, 0x14400007, 0x00000000, 0x3c020800, ++0x944223d2, 0x95230002, 0x01e21023, 0x10620077, ++0x00000000, 0x3c020800, 0x944223d2, 0x01e21023, ++0xa5220002, 0x3c020800, 0x8c42240c, 0x1040001a, ++0x31e3ffff, 0x8dc70010, 0x3c020800, 0x94421b96, ++0x00e04021, 0x00072c02, 0x00aa2021, 0x00431023, ++0x00823823, 0x00072402, 0x30e2ffff, 0x00823821, ++0x00071027, 0xa522000a, 0x3102ffff, 0x3c040800, ++0x948423d4, 0x00453023, 0x00e02821, 0x00641823, ++0x006d1821, 0x00c33021, 0x00061c02, 0x30c2ffff, ++0x0a00047d, 0x00623021, 0x01203821, 0x00004021, ++0x3082ffff, 0x00021042, 0x18400008, 0x00003021, ++0x00401821, 0x94e20000, 0x25080001, 0x00c23021, ++0x0103102a, 0x1440fffb, 0x24e70002, 0x00061c02, ++0x30c2ffff, 0x00623021, 0x00061402, 0x00c23021, ++0x00c02821, 0x00061027, 0xa522000a, 0x00003021, ++0x2527000c, 0x00004021, 0x94e20000, 0x25080001, ++0x00c23021, 0x2d020004, 0x1440fffb, 0x24e70002, ++0x95220002, 0x00004021, 0x91230009, 0x00442023, ++0x01803821, 0x3082ffff, 0xa4e00010, 0x3c040800, ++0x948423d4, 0x00621821, 0x00c33021, 0x00061c02, ++0x30c2ffff, 0x00623021, 0x00061c02, 0x3c020800, ++0x944223d0, 0x00c34821, 0x00441023, 0x00021fc2, ++0x00431021, 0x00021043, 0x18400010, 0x00003021, ++0x00402021, 0x94e20000, 0x24e70002, 0x00c23021, ++0x30e2007f, 0x14400006, 0x25080001, 0x8d630000, ++0x3c02007f, 0x3442ff80, 0x00625824, 0x25670008, ++0x0104102a, 0x1440fff3, 0x00000000, 0x3c020800, ++0x944223ec, 0x00c23021, 0x3122ffff, 0x00c23021, ++0x00061c02, 0x30c2ffff, 0x00623021, 0x00061402, ++0x00c23021, 0x00c04021, 0x00061027, 0xa5820010, ++0xadc00014, 0x0a00049d, 0xadc00000, 0x8dc70010, ++0x00e04021, 0x11400007, 0x00072c02, 0x00aa3021, ++0x00061402, 0x30c3ffff, 0x00433021, 0x00061402, ++0x00c22821, 0x00051027, 0xa522000a, 0x3c030800, ++0x946323d4, 0x3102ffff, 0x01e21021, 0x00433023, ++0x00cd3021, 0x00061c02, 0x30c2ffff, 0x00623021, ++0x00061402, 0x00c23021, 0x00c04021, 0x00061027, ++0xa5820010, 0x3102ffff, 0x00051c00, 0x00431025, ++0xadc20010, 0x3c020800, 0x8c4223f4, 0x10400005, ++0x2de205eb, 0x14400002, 0x25e2fff2, 0x34028870, ++0xa5c20034, 0x3c030800, 0x246323e8, 0x8c620000, ++0x24420001, 0xac620000, 0x3c040800, 0x8c8423e4, ++0x3c020800, 0x8c421bc0, 0x3303ffff, 0x00832021, ++0x00431821, 0x0062102b, 0x3c010800, 0xac2423e4, ++0x10400003, 0x2482ffff, 0x3c010800, 0xac2223e4, ++0x3c010800, 0xac231bc0, 0x03e00008, 0x27bd0020, ++0x27bdffb8, 0x3c050800, 0x24a51b96, 0xafbf0044, ++0xafbe0040, 0xafb7003c, 0xafb60038, 0xafb50034, ++0xafb40030, 0xafb3002c, 0xafb20028, 0xafb10024, ++0xafb00020, 0x94a90000, 0x3c020800, 0x944223d0, ++0x3c030800, 0x8c631bb0, 0x3c040800, 0x8c841bac, ++0x01221023, 0x0064182a, 0xa7a9001e, 0x106000be, ++0xa7a20016, 0x24be0022, 0x97b6001e, 0x24b3001a, ++0x24b70016, 0x8fc20000, 0x14400008, 0x00000000, ++0x8fc2fff8, 0x97a30016, 0x8fc4fff4, 0x00431021, ++0x0082202a, 0x148000b0, 0x00000000, 0x97d50818, ++0x32a2ffff, 0x104000a3, 0x00009021, 0x0040a021, ++0x00008821, 0x0e000625, 0x00000000, 0x00403021, ++0x14c00007, 0x00000000, 0x3c020800, 0x8c4223dc, ++0x24420001, 0x3c010800, 0x0a000596, 0xac2223dc, ++0x3c100800, 0x02118021, 0x8e101bc8, 0x9608000a, ++0x31020040, 0x10400005, 0x2407180c, 0x8e02000c, ++0x2407188c, 0x00021400, 0xacc20018, 0x31020080, ++0x54400001, 0x34e70010, 0x3c020800, 0x00511021, ++0x8c421bd0, 0x3c030800, 0x00711821, 0x8c631bd4, ++0x00021500, 0x00031c00, 0x00431025, 0xacc20014, ++0x96040008, 0x3242ffff, 0x00821021, 0x0282102a, ++0x14400002, 0x02b22823, 0x00802821, 0x8e020000, ++0x02459021, 0xacc20000, 0x8e020004, 0x00c02021, ++0x26310010, 0xac820004, 0x30e2ffff, 0xac800008, ++0xa485000e, 0xac820010, 0x24020305, 0x0e0005a2, ++0xa482000c, 0x3242ffff, 0x0054102b, 0x1440ffc5, ++0x3242ffff, 0x0a00058e, 0x00000000, 0x8e620000, ++0x8e63fffc, 0x0043102a, 0x10400067, 0x00000000, ++0x8e62fff0, 0x00028900, 0x3c100800, 0x02118021, ++0x0e000625, 0x8e101bc8, 0x00403021, 0x14c00005, ++0x00000000, 0x8e62082c, 0x24420001, 0x0a000596, ++0xae62082c, 0x9608000a, 0x31020040, 0x10400005, ++0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, ++0xacc20018, 0x3c020800, 0x00511021, 0x8c421bd0, ++0x3c030800, 0x00711821, 0x8c631bd4, 0x00021500, ++0x00031c00, 0x00431025, 0xacc20014, 0x8e63fff4, ++0x96020008, 0x00432023, 0x3242ffff, 0x3083ffff, ++0x00431021, 0x02c2102a, 0x10400003, 0x00802821, ++0x97a9001e, 0x01322823, 0x8e620000, 0x30a4ffff, ++0x00441021, 0xae620000, 0xa4c5000e, 0x8e020000, ++0xacc20000, 0x8e020004, 0x8e63fff4, 0x00431021, ++0xacc20004, 0x8e63fff4, 0x96020008, 0x00641821, ++0x0062102a, 0x14400006, 0x02459021, 0x8e62fff0, ++0xae60fff4, 0x24420001, 0x0a000571, 0xae62fff0, ++0xae63fff4, 0xacc00008, 0x3242ffff, 0x10560003, ++0x31020004, 0x10400006, 0x24020305, 0x31020080, ++0x54400001, 0x34e70010, 0x34e70020, 0x24020905, ++0xa4c2000c, 0x8ee30000, 0x8ee20004, 0x14620007, ++0x3c02b49a, 0x8ee20860, 0x54400001, 0x34e70400, ++0x3c024b65, 0x0a000588, 0x34427654, 0x344289ab, ++0xacc2001c, 0x30e2ffff, 0xacc20010, 0x0e0005a2, ++0x00c02021, 0x3242ffff, 0x0056102b, 0x1440ff9b, ++0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a, ++0x1440ff48, 0x00000000, 0x8fbf0044, 0x8fbe0040, ++0x8fb7003c, 0x8fb60038, 0x8fb50034, 0x8fb40030, ++0x8fb3002c, 0x8fb20028, 0x8fb10024, 0x8fb00020, ++0x03e00008, 0x27bd0048, 0x27bdffe8, 0xafbf0014, ++0xafb00010, 0x8f624450, 0x8f634410, 0x0a0005b1, ++0x00808021, 0x8f626820, 0x30422000, 0x10400003, ++0x00000000, 0x0e0001f0, 0x00002021, 0x8f624450, ++0x8f634410, 0x3042ffff, 0x0043102b, 0x1440fff5, ++0x00000000, 0x8f630c14, 0x3063000f, 0x2c620002, ++0x1440000b, 0x00000000, 0x8f630c14, 0x3c020800, ++0x8c421b40, 0x3063000f, 0x24420001, 0x3c010800, ++0xac221b40, 0x2c620002, 0x1040fff7, 0x00000000, ++0xaf705c18, 0x8f625c10, 0x30420002, 0x10400009, ++0x00000000, 0x8f626820, 0x30422000, 0x1040fff8, ++0x00000000, 0x0e0001f0, 0x00002021, 0x0a0005c4, ++0x00000000, 0x8fbf0014, 0x8fb00010, 0x03e00008, ++0x27bd0018, 0x00000000, 0x00000000, 0x00000000, ++0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010, ++0xaf60680c, 0x8f626804, 0x34420082, 0xaf626804, ++0x8f634000, 0x24020b50, 0x3c010800, 0xac221b54, ++0x24020b78, 0x3c010800, 0xac221b64, 0x34630002, ++0xaf634000, 0x0e000605, 0x00808021, 0x3c010800, ++0xa0221b68, 0x304200ff, 0x24030002, 0x14430005, ++0x00000000, 0x3c020800, 0x8c421b54, 0x0a0005f8, ++0xac5000c0, 0x3c020800, 0x8c421b54, 0xac5000bc, ++0x8f624434, 0x8f634438, 0x8f644410, 0x3c010800, ++0xac221b5c, 0x3c010800, 0xac231b6c, 0x3c010800, ++0xac241b58, 0x8fbf0014, 0x8fb00010, 0x03e00008, ++0x27bd0018, 0x3c040800, 0x8c870000, 0x3c03aa55, ++0x3463aa55, 0x3c06c003, 0xac830000, 0x8cc20000, ++0x14430007, 0x24050002, 0x3c0355aa, 0x346355aa, ++0xac830000, 0x8cc20000, 0x50430001, 0x24050001, ++0x3c020800, 0xac470000, 0x03e00008, 0x00a01021, ++0x27bdfff8, 0x18800009, 0x00002821, 0x8f63680c, ++0x8f62680c, 0x1043fffe, 0x00000000, 0x24a50001, ++0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008, ++0x27bd0008, 0x8f634450, 0x3c020800, 0x8c421b5c, ++0x00031c02, 0x0043102b, 0x14400008, 0x3c038000, ++0x3c040800, 0x8c841b6c, 0x8f624450, 0x00021c02, ++0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, ++0x8f624444, 0x00431024, 0x1440fffd, 0x00000000, ++0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff, ++0x2442e000, 0x2c422001, 0x14400003, 0x3c024000, ++0x0a000648, 0x2402ffff, 0x00822025, 0xaf645c38, ++0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021, ++0x03e00008, 0x00000000, 0x8f624450, 0x3c030800, ++0x8c631b58, 0x0a000651, 0x3042ffff, 0x8f624450, ++0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000, ++0x03e00008, 0x00000000, 0x27bdffe0, 0x00802821, ++0x3c040800, 0x24841af0, 0x00003021, 0x00003821, ++0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, ++0x0a000660, 0x00000000, 0x8fbf0018, 0x03e00008, ++0x27bd0020, 0x00000000, 0x00000000, 0x00000000, ++0x3c020800, 0x34423000, 0x3c030800, 0x34633000, ++0x3c040800, 0x348437ff, 0x3c010800, 0xac221b74, ++0x24020040, 0x3c010800, 0xac221b78, 0x3c010800, ++0xac201b70, 0xac600000, 0x24630004, 0x0083102b, ++0x5040fffd, 0xac600000, 0x03e00008, 0x00000000, ++0x00804821, 0x8faa0010, 0x3c020800, 0x8c421b70, ++0x3c040800, 0x8c841b78, 0x8fab0014, 0x24430001, ++0x0044102b, 0x3c010800, 0xac231b70, 0x14400003, ++0x00004021, 0x3c010800, 0xac201b70, 0x3c020800, ++0x8c421b70, 0x3c030800, 0x8c631b74, 0x91240000, ++0x00021140, 0x00431021, 0x00481021, 0x25080001, ++0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, ++0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74, ++0x8f64680c, 0x00021140, 0x00431021, 0xac440008, ++0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, ++0x03e00008, 0xac4b001c, 0x00000000, 0x00000000, ++0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, ++0x43707541, 0x00000000, 0x00000000, 0x00000000, ++0x73746b6f, 0x66666c64, 0x496e0000, 0x73746b6f, ++0x66662a2a, 0x00000000, 0x53774576, 0x656e7430, ++0x00000000, 0x00000000, 0x00000000, 0x00000000, ++0x66617461, 0x6c457272, 0x00000000, 0x00000000, ++0x00000000, 0x00000000, 0x00000000, 0x00000000, ++0x00000000, 0x00000000, 0x00000000, 0x00000000, ++0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, ++0x362e3000, 0x00000000, 0x00000000, 0x00000000, ++0x00000000, 0x00000000, 0x00000000, 0x00000000, ++}; ++ ++static const struct tg3_firmware tg3_lgcy_tso_fw = { ++ .size = TG3_LGCY_TSO_FW_LEN, ++ .data = (u8 *)&tg3TsoFwText[0], ++}; ++ ++/* 5705 needs a special version of the TSO firmware. */ ++#define TG3_TSO5_FW_RELEASE_MAJOR 0x1 ++#define TG3_TSO5_FW_RELASE_MINOR 0x2 ++#define TG3_TSO5_FW_RELEASE_FIX 0x0 ++#define TG3_TSO5_FW_START_ADDR 0x00010000 ++#define TG3_TSO5_FW_TEXT_ADDR 0x00010000 ++#define TG3_TSO5_FW_TEXT_LEN 0xe90 ++#define TG3_TSO5_FW_RODATA_ADDR 0x00010e90 ++#define TG3_TSO5_FW_RODATA_LEN 0x50 ++#define TG3_TSO5_FW_DATA_ADDR 0x00010f00 ++#define TG3_TSO5_FW_DATA_LEN 0x20 ++#define TG3_TSO5_FW_SBSS_ADDR 0x00010f20 ++#define TG3_TSO5_FW_SBSS_LEN 0x28 ++#define TG3_TSO5_FW_BSS_ADDR 0x00010f50 ++#define TG3_TSO5_FW_BSS_LEN 0x88 ++ ++#define TG3_5705_TSO_FW_LEN \ ++ (TG3_TSO5_FW_TEXT_LEN + \ ++ TG3_TSO5_FW_RODATA_LEN + \ ++ 0x20 + \ ++ TG3_TSO5_FW_DATA_LEN) ++ ++static const u32 tg3Tso5FwText[] = { ++0x00010200, (u32)TG3_TSO5_FW_TEXT_ADDR, (u32)TG3_5705_TSO_FW_LEN, ++0x0c004003, 0x00000000, 0x00010f04, 0x00000000, ++0x10000003, 0x00000000, 0x0000000d, 0x0000000d, ++0x3c1d0001, 0x37bde000, 0x03a0f021, 0x3c100001, ++0x26100000, 0x0c004010, 0x00000000, 0x0000000d, ++0x27bdffe0, 0x3c04fefe, 0xafbf0018, 0x0c0042e8, ++0x34840002, 0x0c004364, 0x00000000, 0x3c030001, ++0x90630f34, 0x24020002, 0x3c040001, 0x24840e9c, ++0x14620003, 0x24050001, 0x3c040001, 0x24840e90, ++0x24060002, 0x00003821, 0xafa00010, 0x0c004378, ++0xafa00014, 0x0c00402c, 0x00000000, 0x8fbf0018, ++0x03e00008, 0x27bd0020, 0x00000000, 0x00000000, ++0x27bdffe0, 0xafbf001c, 0xafb20018, 0xafb10014, ++0x0c0042d4, 0xafb00010, 0x3c128000, 0x24110001, ++0x8f706810, 0x32020400, 0x10400007, 0x00000000, ++0x8f641008, 0x00921024, 0x14400003, 0x00000000, ++0x0c004064, 0x00000000, 0x3c020001, 0x90420f56, ++0x10510003, 0x32020200, 0x1040fff1, 0x00000000, ++0x0c0041b4, 0x00000000, 0x08004034, 0x00000000, ++0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, ++0x03e00008, 0x27bd0020, 0x27bdffe0, 0x3c040001, ++0x24840eb0, 0x00002821, 0x00003021, 0x00003821, ++0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, ++0x0000d021, 0x24020130, 0xaf625000, 0x3c010001, ++0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018, ++0x03e00008, 0x27bd0020, 0x00000000, 0x00000000, ++0x3c030001, 0x24630f60, 0x90620000, 0x27bdfff0, ++0x14400003, 0x0080c021, 0x08004073, 0x00004821, ++0x3c022000, 0x03021024, 0x10400003, 0x24090002, ++0x08004073, 0xa0600000, 0x24090001, 0x00181040, ++0x30431f80, 0x346f8008, 0x1520004b, 0x25eb0028, ++0x3c040001, 0x00832021, 0x8c848010, 0x3c050001, ++0x24a50f7a, 0x00041402, 0xa0a20000, 0x3c010001, ++0xa0240f7b, 0x3c020001, 0x00431021, 0x94428014, ++0x3c010001, 0xa0220f7c, 0x3c0c0001, 0x01836021, ++0x8d8c8018, 0x304200ff, 0x24420008, 0x000220c3, ++0x24020001, 0x3c010001, 0xa0220f60, 0x0124102b, ++0x1040000c, 0x00003821, 0x24a6000e, 0x01602821, ++0x8ca20000, 0x8ca30004, 0x24a50008, 0x24e70001, ++0xacc20000, 0xacc30004, 0x00e4102b, 0x1440fff8, ++0x24c60008, 0x00003821, 0x3c080001, 0x25080f7b, ++0x91060000, 0x3c020001, 0x90420f7c, 0x2503000d, ++0x00c32821, 0x00461023, 0x00021fc2, 0x00431021, ++0x00021043, 0x1840000c, 0x00002021, 0x91020001, ++0x00461023, 0x00021fc2, 0x00431021, 0x00021843, ++0x94a20000, 0x24e70001, 0x00822021, 0x00e3102a, ++0x1440fffb, 0x24a50002, 0x00041c02, 0x3082ffff, ++0x00622021, 0x00041402, 0x00822021, 0x3c02ffff, ++0x01821024, 0x3083ffff, 0x00431025, 0x3c010001, ++0x080040fa, 0xac220f80, 0x3c050001, 0x24a50f7c, ++0x90a20000, 0x3c0c0001, 0x01836021, 0x8d8c8018, ++0x000220c2, 0x1080000e, 0x00003821, 0x01603021, ++0x24a5000c, 0x8ca20000, 0x8ca30004, 0x24a50008, ++0x24e70001, 0xacc20000, 0xacc30004, 0x00e4102b, ++0x1440fff8, 0x24c60008, 0x3c050001, 0x24a50f7c, ++0x90a20000, 0x30430007, 0x24020004, 0x10620011, ++0x28620005, 0x10400005, 0x24020002, 0x10620008, ++0x000710c0, 0x080040fa, 0x00000000, 0x24020006, ++0x1062000e, 0x000710c0, 0x080040fa, 0x00000000, ++0x00a21821, 0x9463000c, 0x004b1021, 0x080040fa, ++0xa4430000, 0x000710c0, 0x00a21821, 0x8c63000c, ++0x004b1021, 0x080040fa, 0xac430000, 0x00a21821, ++0x8c63000c, 0x004b2021, 0x00a21021, 0xac830000, ++0x94420010, 0xa4820004, 0x95e70006, 0x3c020001, ++0x90420f7c, 0x3c030001, 0x90630f7a, 0x00e2c823, ++0x3c020001, 0x90420f7b, 0x24630028, 0x01e34021, ++0x24420028, 0x15200012, 0x01e23021, 0x94c2000c, ++0x3c010001, 0xa4220f78, 0x94c20004, 0x94c30006, ++0x3c010001, 0xa4200f76, 0x3c010001, 0xa4200f72, ++0x00021400, 0x00431025, 0x3c010001, 0xac220f6c, ++0x95020004, 0x3c010001, 0x08004124, 0xa4220f70, ++0x3c020001, 0x94420f70, 0x3c030001, 0x94630f72, ++0x00431021, 0xa5020004, 0x3c020001, 0x94420f6c, ++0xa4c20004, 0x3c020001, 0x8c420f6c, 0xa4c20006, ++0x3c040001, 0x94840f72, 0x3c020001, 0x94420f70, ++0x3c0a0001, 0x954a0f76, 0x00441821, 0x3063ffff, ++0x0062182a, 0x24020002, 0x1122000b, 0x00832023, ++0x3c030001, 0x94630f78, 0x30620009, 0x10400006, ++0x3062fff6, 0xa4c2000c, 0x3c020001, 0x94420f78, ++0x30420009, 0x01425023, 0x24020001, 0x1122001b, ++0x29220002, 0x50400005, 0x24020002, 0x11200007, ++0x31a2ffff, 0x08004197, 0x00000000, 0x1122001d, ++0x24020016, 0x08004197, 0x31a2ffff, 0x3c0e0001, ++0x95ce0f80, 0x10800005, 0x01806821, 0x01c42021, ++0x00041c02, 0x3082ffff, 0x00627021, 0x000e1027, ++0xa502000a, 0x3c030001, 0x90630f7b, 0x31a2ffff, ++0x00e21021, 0x0800418d, 0x00432023, 0x3c020001, ++0x94420f80, 0x00442021, 0x00041c02, 0x3082ffff, ++0x00622021, 0x00807021, 0x00041027, 0x08004185, ++0xa502000a, 0x3c050001, 0x24a50f7a, 0x90a30000, ++0x14620002, 0x24e2fff2, 0xa5e20034, 0x90a20000, ++0x00e21023, 0xa5020002, 0x3c030001, 0x94630f80, ++0x3c020001, 0x94420f5a, 0x30e5ffff, 0x00641821, ++0x00451023, 0x00622023, 0x00041c02, 0x3082ffff, ++0x00622021, 0x00041027, 0xa502000a, 0x3c030001, ++0x90630f7c, 0x24620001, 0x14a20005, 0x00807021, ++0x01631021, 0x90420000, 0x08004185, 0x00026200, ++0x24620002, 0x14a20003, 0x306200fe, 0x004b1021, ++0x944c0000, 0x3c020001, 0x94420f82, 0x3183ffff, ++0x3c040001, 0x90840f7b, 0x00431021, 0x00e21021, ++0x00442023, 0x008a2021, 0x00041c02, 0x3082ffff, ++0x00622021, 0x00041402, 0x00822021, 0x00806821, ++0x00041027, 0xa4c20010, 0x31a2ffff, 0x000e1c00, ++0x00431025, 0x3c040001, 0x24840f72, 0xade20010, ++0x94820000, 0x3c050001, 0x94a50f76, 0x3c030001, ++0x8c630f6c, 0x24420001, 0x00b92821, 0xa4820000, ++0x3322ffff, 0x00622021, 0x0083182b, 0x3c010001, ++0xa4250f76, 0x10600003, 0x24a2ffff, 0x3c010001, ++0xa4220f76, 0x3c024000, 0x03021025, 0x3c010001, ++0xac240f6c, 0xaf621008, 0x03e00008, 0x27bd0010, ++0x3c030001, 0x90630f56, 0x27bdffe8, 0x24020001, ++0xafbf0014, 0x10620026, 0xafb00010, 0x8f620cf4, ++0x2442ffff, 0x3042007f, 0x00021100, 0x8c434000, ++0x3c010001, 0xac230f64, 0x8c434008, 0x24444000, ++0x8c5c4004, 0x30620040, 0x14400002, 0x24020088, ++0x24020008, 0x3c010001, 0xa4220f68, 0x30620004, ++0x10400005, 0x24020001, 0x3c010001, 0xa0220f57, ++0x080041d5, 0x00031402, 0x3c010001, 0xa0200f57, ++0x00031402, 0x3c010001, 0xa4220f54, 0x9483000c, ++0x24020001, 0x3c010001, 0xa4200f50, 0x3c010001, ++0xa0220f56, 0x3c010001, 0xa4230f62, 0x24020001, ++0x1342001e, 0x00000000, 0x13400005, 0x24020003, ++0x13420067, 0x00000000, 0x080042cf, 0x00000000, ++0x3c020001, 0x94420f62, 0x241a0001, 0x3c010001, ++0xa4200f5e, 0x3c010001, 0xa4200f52, 0x304407ff, ++0x00021bc2, 0x00031823, 0x3063003e, 0x34630036, ++0x00021242, 0x3042003c, 0x00621821, 0x3c010001, ++0xa4240f58, 0x00832021, 0x24630030, 0x3c010001, ++0xa4240f5a, 0x3c010001, 0xa4230f5c, 0x3c060001, ++0x24c60f52, 0x94c50000, 0x94c30002, 0x3c040001, ++0x94840f5a, 0x00651021, 0x0044102a, 0x10400013, ++0x3c108000, 0x00a31021, 0xa4c20000, 0x3c02a000, ++0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008, ++0x00901024, 0x14400003, 0x00000000, 0x0c004064, ++0x00000000, 0x8f620cf4, 0x00501024, 0x104000b7, ++0x00000000, 0x0800420f, 0x00000000, 0x3c030001, ++0x94630f50, 0x00851023, 0xa4c40000, 0x00621821, ++0x3042ffff, 0x3c010001, 0xa4230f50, 0xaf620ce8, ++0x3c020001, 0x94420f68, 0x34420024, 0xaf620cec, ++0x94c30002, 0x3c020001, 0x94420f50, 0x14620012, ++0x3c028000, 0x3c108000, 0x3c02a000, 0xaf620cf4, ++0x3c010001, 0xa0200f56, 0x8f641008, 0x00901024, ++0x14400003, 0x00000000, 0x0c004064, 0x00000000, ++0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, ++0x080042cf, 0x241a0003, 0xaf620cf4, 0x3c108000, ++0x8f641008, 0x00901024, 0x14400003, 0x00000000, ++0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024, ++0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003, ++0x3c070001, 0x24e70f50, 0x94e20000, 0x03821021, ++0xaf620ce0, 0x3c020001, 0x8c420f64, 0xaf620ce4, ++0x3c050001, 0x94a50f54, 0x94e30000, 0x3c040001, ++0x94840f58, 0x3c020001, 0x94420f5e, 0x00a32823, ++0x00822023, 0x30a6ffff, 0x3083ffff, 0x00c3102b, ++0x14400043, 0x00000000, 0x3c020001, 0x94420f5c, ++0x00021400, 0x00621025, 0xaf620ce8, 0x94e20000, ++0x3c030001, 0x94630f54, 0x00441021, 0xa4e20000, ++0x3042ffff, 0x14430021, 0x3c020008, 0x3c020001, ++0x90420f57, 0x10400006, 0x3c03000c, 0x3c020001, ++0x94420f68, 0x34630624, 0x0800427c, 0x0000d021, ++0x3c020001, 0x94420f68, 0x3c030008, 0x34630624, ++0x00431025, 0xaf620cec, 0x3c108000, 0x3c02a000, ++0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008, ++0x00901024, 0x14400003, 0x00000000, 0x0c004064, ++0x00000000, 0x8f620cf4, 0x00501024, 0x10400015, ++0x00000000, 0x08004283, 0x00000000, 0x3c030001, ++0x94630f68, 0x34420624, 0x3c108000, 0x00621825, ++0x3c028000, 0xaf630cec, 0xaf620cf4, 0x8f641008, ++0x00901024, 0x14400003, 0x00000000, 0x0c004064, ++0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7, ++0x00000000, 0x3c010001, 0x080042cf, 0xa4200f5e, ++0x3c020001, 0x94420f5c, 0x00021400, 0x00c21025, ++0xaf620ce8, 0x3c020001, 0x90420f57, 0x10400009, ++0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624, ++0x0000d021, 0x00431025, 0xaf620cec, 0x080042c1, ++0x3c108000, 0x3c020001, 0x94420f68, 0x3c030008, ++0x34630604, 0x00431025, 0xaf620cec, 0x3c020001, ++0x94420f5e, 0x00451021, 0x3c010001, 0xa4220f5e, ++0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001, ++0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, ++0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, ++0x00501024, 0x1440fff7, 0x00000000, 0x8fbf0014, ++0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000, ++0x27bdffe0, 0x3c040001, 0x24840ec0, 0x00002821, ++0x00003021, 0x00003821, 0xafbf0018, 0xafa00010, ++0x0c004378, 0xafa00014, 0x0000d021, 0x24020130, ++0xaf625000, 0x3c010001, 0xa4200f50, 0x3c010001, ++0xa0200f57, 0x8fbf0018, 0x03e00008, 0x27bd0020, ++0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010, ++0xaf60680c, 0x8f626804, 0x34420082, 0xaf626804, ++0x8f634000, 0x24020b50, 0x3c010001, 0xac220f20, ++0x24020b78, 0x3c010001, 0xac220f30, 0x34630002, ++0xaf634000, 0x0c004315, 0x00808021, 0x3c010001, ++0xa0220f34, 0x304200ff, 0x24030002, 0x14430005, ++0x00000000, 0x3c020001, 0x8c420f20, 0x08004308, ++0xac5000c0, 0x3c020001, 0x8c420f20, 0xac5000bc, ++0x8f624434, 0x8f634438, 0x8f644410, 0x3c010001, ++0xac220f28, 0x3c010001, 0xac230f38, 0x3c010001, ++0xac240f24, 0x8fbf0014, 0x8fb00010, 0x03e00008, ++0x27bd0018, 0x03e00008, 0x24020001, 0x27bdfff8, ++0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, ++0x1043fffe, 0x00000000, 0x24a50001, 0x00a4102a, ++0x1440fff9, 0x00000000, 0x03e00008, 0x27bd0008, ++0x8f634450, 0x3c020001, 0x8c420f28, 0x00031c02, ++0x0043102b, 0x14400008, 0x3c038000, 0x3c040001, ++0x8c840f38, 0x8f624450, 0x00021c02, 0x0083102b, ++0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, ++0x00431024, 0x1440fffd, 0x00000000, 0x8f624448, ++0x03e00008, 0x3042ffff, 0x3082ffff, 0x2442e000, ++0x2c422001, 0x14400003, 0x3c024000, 0x08004347, ++0x2402ffff, 0x00822025, 0xaf645c38, 0x8f625c30, ++0x30420002, 0x1440fffc, 0x00001021, 0x03e00008, ++0x00000000, 0x8f624450, 0x3c030001, 0x8c630f24, ++0x08004350, 0x3042ffff, 0x8f624450, 0x3042ffff, ++0x0043102b, 0x1440fffc, 0x00000000, 0x03e00008, ++0x00000000, 0x27bdffe0, 0x00802821, 0x3c040001, ++0x24840ed0, 0x00003021, 0x00003821, 0xafbf0018, ++0xafa00010, 0x0c004378, 0xafa00014, 0x0800435f, ++0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, ++0x3c020001, 0x3442d600, 0x3c030001, 0x3463d600, ++0x3c040001, 0x3484ddff, 0x3c010001, 0xac220f40, ++0x24020040, 0x3c010001, 0xac220f44, 0x3c010001, ++0xac200f3c, 0xac600000, 0x24630004, 0x0083102b, ++0x5040fffd, 0xac600000, 0x03e00008, 0x00000000, ++0x00804821, 0x8faa0010, 0x3c020001, 0x8c420f3c, ++0x3c040001, 0x8c840f44, 0x8fab0014, 0x24430001, ++0x0044102b, 0x3c010001, 0xac230f3c, 0x14400003, ++0x00004021, 0x3c010001, 0xac200f3c, 0x3c020001, ++0x8c420f3c, 0x3c030001, 0x8c630f40, 0x91240000, ++0x00021140, 0x00431021, 0x00481021, 0x25080001, ++0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, ++0x3c020001, 0x8c420f3c, 0x3c030001, 0x8c630f40, ++0x8f64680c, 0x00021140, 0x00431021, 0xac440008, ++0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, ++0x03e00008, 0xac4b001c, 0x00000000, 0x00000000, ++0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, ++0x43707541, 0x00000000, 0x00000000, 0x00000000, ++0x73746b6f, 0x66666c64, 0x00000000, 0x00000000, ++0x73746b6f, 0x66666c64, 0x00000000, 0x00000000, ++0x66617461, 0x6c457272, 0x00000000, 0x00000000, ++0x00000000, 0x00000000, 0x00000000, 0x00000000, ++0x00000000, 0x00000000, 0x00000000, 0x00000000, ++0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, ++0x322e3000, 0x00000000, 0x00000000, 0x00000000, ++}; ++ ++static const struct tg3_firmware tg3_5705_tso_fw = { ++ .size = TG3_5705_TSO_FW_LEN, ++ .data = (u8 *)&tg3Tso5FwText[0], ++}; ++ ++#endif /* TG3_TSO_SUPPORT != 0 */ ++ ++static int tg3_hidden_request_firmware(const struct tg3_firmware **fw, ++ const char *name) ++{ ++ *fw = 0; ++ ++ if (strcmp(name, "tigon/tg3.bin") == 0) ++ *fw = &tg3_5701_fw; ++ else if (strcmp(name, "tigon/tg357766.bin") == 0) ++ *fw = &tg3_57766_fw; ++#if TG3_TSO_SUPPORT != 0 ++ else if (strcmp(name, "tigon/tg3_tso.bin") == 0) ++ *fw = &tg3_lgcy_tso_fw; ++ else if (strcmp(name, "tigon/tg3_tso5.bin") == 0) ++ *fw = &tg3_5705_tso_fw; ++#endif ++ ++ return *fw ? 0 : -EINVAL; ++} ++ ++#define tg3_priv_request_firmware(x, y, z) tg3_hidden_request_firmware((x), (y)) ++ ++#define tg3_priv_release_firmware(x) ++ ++#endif /* BCM_HAS_REQUEST_FIRMWARE */ +diff --git a/drivers/net/ethernet/broadcom/tg3/tg3_flags.h b/drivers/net/ethernet/broadcom/tg3/tg3_flags.h +new file mode 100644 +index 0000000..6788434 +--- /dev/null ++++ b/drivers/net/ethernet/broadcom/tg3/tg3_flags.h +@@ -0,0 +1,95 @@ ++#define BCM_HAS_BOOL ++#define BCM_HAS_LE32 ++#define BCM_HAS_RESOURCE_SIZE_T ++#define BCM_HAS_KZALLOC ++#define BCM_HAS_JIFFIES_TO_USECS ++#define BCM_HAS_USECS_TO_JIFFIES ++#define BCM_HAS_MSECS_TO_JIFFIES ++#define BCM_HAS_MSLEEP ++#define BCM_HAS_MSLEEP_INTERRUPTIBLE ++#define BCM_HAS_SKB_COPY_FROM_LINEAR_DATA ++#define BCM_HAS_SKB_IS_GSO_V6 ++#define BCM_HAS_SKB_CHECKSUM_NONE_ASSERT ++#define BCM_KERNEL_SUPPORTS_TIMESTAMPING ++#define BCM_HAS_SKB_TX_TIMESTAMP ++#define BCM_HAS_SKB_FRAG_SIZE ++#define BCM_HAS_SKB_FRAG_DMA_MAP ++#define BCM_HAS_PCI_PCIE_CAP ++#define BCM_HAS_PCIE_CAP_RW ++#define BCM_HAS_PCI_IS_PCIE ++#define BCM_HAS_PCI_IOREMAP_BAR ++#define BCM_HAS_PCI_READ_VPD ++#define BCM_HAS_INTX_MSI_WORKAROUND ++#define BCM_HAS_PCI_TARGET_STATE ++#define BCM_HAS_PCI_CHOOSE_STATE ++#define BCM_HAS_PCI_PME_CAPABLE ++#define BCM_HAS_PCI_ENABLE_WAKE ++#define BCM_HAS_PCI_WAKE_FROM_D3 ++#define BCM_HAS_PCI_SET_POWER_STATE ++#define BCM_HAS_PCI_EEH_SUPPORT ++#define BCM_HAS_PCI_IS_ENABLED ++#define BCM_HAS_DEVICE_WAKEUP_API ++#define BCM_HAS_DEVICE_SET_WAKEUP_CAPABLE ++#define BCM_HAS_NEW_PCI_DMA_MAPPING_ERROR ++#define BCM_HAS_PCIE_GET_READRQ ++#define BCM_HAS_PCIE_SET_READRQ ++#define BCM_HAS_ETHTOOL_OP_SET_TX_IPV6_CSUM ++#define BCM_HAS_ETHTOOL_OP_SET_TX_HW_CSUM ++#define BCM_HAS_ETHTOOL_OP_SET_SG ++#define BCM_HAS_ETHTOOL_OP_SET_TSO ++#define BCM_HAS_MDIX_STATUS ++#define BCM_HAS_SET_PHYS_ID ++#define BCM_HAS_SET_TX_CSUM ++#define BCM_HAS_ETHTOOL_CMD_SPEED_SET ++#define BCM_HAS_ETHTOOL_CMD_SPEED ++#define BCM_HAS_EXTERNAL_LB_DONE ++#define BCM_HAS_GET_RXNFC ++#define BCM_HAS_GET_RXFH_INDIR ++#define BCM_HAS_LP_ADVERTISING ++#define BCM_HAS_SKB_TRANSPORT_OFFSET ++#define BCM_HAS_SKB_GET_QUEUE_MAPPING ++#define BCM_HAS_IP_HDR ++#define BCM_HAS_IP_HDRLEN ++#define BCM_HAS_TCP_HDR ++#define BCM_HAS_TCP_HDRLEN ++#define BCM_HAS_TCP_OPTLEN ++#define BCM_HAS_STRUCT_NETDEV_QUEUE ++#define BCM_HAS_NETIF_SET_REAL_NUM_TX_QUEUES ++#define BCM_HAS_NETIF_SET_REAL_NUM_RX_QUEUES ++#define BCM_HAS_NETDEV_PRIV ++#define BCM_HAS_NETDEV_TX_T ++#define BCM_HAS_NETDEV_HW_ADDR ++#define BCM_HAS_NETDEV_NAME ++#define BCM_HAS_NETDEV_SENT_QUEUE ++#define BCM_HAS_NETDEV_TX_SENT_QUEUE ++#define BCM_HAS_NETDEV_COMPLETED_QUEUE ++#define BCM_HAS_NETDEV_TX_COMPLETED_QUEUE ++#define BCM_HAS_NETDEV_RESET_QUEUE ++#define BCM_HAS_NETDEV_TX_RESET_QUEUE ++#define BCM_HAS_NET_DEVICE_OPS ++#define BCM_HAS_GET_STATS64 ++#define BCM_HAS_FIX_FEATURES ++#define BCM_HAS_HW_FEATURES ++#define BCM_HAS_VLAN_FEATURES ++#define BCM_HAS_NETDEV_UPDATE_FEATURES ++#define BCM_HAS_ALLOC_ETHERDEV_MQ ++#define BCM_HAS_NAPI_GRO_RECEIVE ++#define BCM_HAS_NETIF_TX_LOCK ++#define BCM_HAS_TXQ_TRANS_UPDATE ++#define BCM_HAS_NETDEV_FEATURES_T ++#define BCM_HAS_NEW_VLAN_INTERFACE ++#define BCM_HAS_DEV_DRIVER_STRING ++#define BCM_HAS_DEV_NAME ++#define BCM_HAS_MDIO_H ++#define BCM_HAS_MII_RESOLVE_FLOWCTRL_FDX ++#define BCM_HAS_MII_ADVERTISE_FLOWCTRL ++#define BCM_HAS_MDIOBUS_ALLOC ++#define BCM_HAS_DMA_DATA_DIRECTION ++#define BCM_HAS_DMA_UNMAP_ADDR ++#define BCM_HAS_DMA_UNMAP_ADDR_SET ++#define BCM_HAS_DMA_ZALLOC_COHERENT ++#define BCM_HAS_IEEE1588_SUPPORT ++#define BCM_HAS_PCI_PMOPS_SHUTDOWN ++#define BCM_HAS_OLD_RXFH_INDIR ++#define BCM_HAS_PCI_CHANNEL_OFFLINE ++#define BCM_HAS_PCI_CHANNEL_IO_NORMAL_ENUM diff --git a/packages/base/any/kernels/3.2.65-1+deb7u2/patches/mgmt-port-init-config.patch b/packages/base/any/kernels/3.2.65-1+deb7u2/patches/mgmt-port-init-config.patch new file mode 100644 index 00000000..23546620 --- /dev/null +++ b/packages/base/any/kernels/3.2.65-1+deb7u2/patches/mgmt-port-init-config.patch @@ -0,0 +1,50 @@ +diff --git a/drivers/net/ethernet/broadcom/tg3/tg3.c b/drivers/net/ethernet/broadcom/tg3/tg3.c +index 4894a11..9b7b7b4 100644 +--- a/drivers/net/ethernet/broadcom/tg3/tg3.c ++++ b/drivers/net/ethernet/broadcom/tg3/tg3.c +@@ -561,6 +561,7 @@ static const struct { + + #define TG3_NUM_TEST ARRAY_SIZE(ethtool_test_keys) + ++static int as7716 = -1; /* as7716=1: as7716 switch is used, it needs as7716 specific patch */ + + static void tg3_write32(struct tg3 *tp, u32 off, u32 val) + { +@@ -1628,6 +1629,10 @@ static void tg3_mdio_config_5785(struct tg3 *tp) + static void tg3_mdio_start(struct tg3 *tp) + { + tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL; ++ ++ if (as7716 == 1) ++ tp->mi_mode |= MAC_MI_MODE_SHORT_PREAMBLE; /* as7716: for accessing external PHY(0x1F) BCM54616S */ ++ + tw32_f(MAC_MI_MODE, tp->mi_mode); + udelay(80); + +@@ -2899,6 +2904,11 @@ static int tg3_phy_reset(struct tg3 *tp) + } + } + ++ if (as7716 == 1 && tp->phy_id == TG3_PHY_ID_BCM5718S) { ++ __tg3_writephy(tp, 0x8, 0x10, 0x1d0); /* as7716: set internal phy 0x8 to make linkup */ ++ __tg3_writephy(tp, 0x1f, 0x4, 0x5e1); /* as7716 enable 10/100 cability of external phy BCM 54616S*/ ++ } ++ + if (tg3_flag(tp, 5717_PLUS) && + (tp->phy_flags & TG3_PHYFLG_MII_SERDES)) + return 0; +@@ -19874,6 +19884,14 @@ static struct pci_driver tg3_driver = { + + static int __init tg3_init(void) + { ++ extern int platform_accton_as7716_32x(void); ++ if (platform_accton_as7716_32x()) { ++ as7716 = 1; ++ printk_once(KERN_INFO "\nAS7716-32X\n"); ++ } ++ else ++ as7716 = 0; ++ + #ifdef TG3_VMWARE_NETQ_ENABLE + int i; + for (i = 0; i < TG3_MAX_NIC; i++) { diff --git a/packages/base/any/kernels/3.2.65-1+deb7u2/patches/platform-accton-as7716_32x-device-drivers.patch b/packages/base/any/kernels/3.2.65-1+deb7u2/patches/platform-accton-as7716_32x-device-drivers.patch new file mode 100644 index 00000000..73ea06b2 --- /dev/null +++ b/packages/base/any/kernels/3.2.65-1+deb7u2/patches/platform-accton-as7716_32x-device-drivers.patch @@ -0,0 +1,1707 @@ +Device driver patches for accton as7716-32x (fan/psu/cpld/led/sfp) + +diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig +index 89c619d..42abae5 100644 +--- a/drivers/hwmon/Kconfig ++++ b/drivers/hwmon/Kconfig +@@ -1573,7 +1573,25 @@ config SENSORS_ACCTON_AS5812_54t_PSU + + This driver can also be built as a module. If so, the module will + be called accton_as5812_54t_psu. +- ++ ++config SENSORS_ACCTON_AS7716_32x_FAN ++ tristate "Accton as7716 32x fan" ++ depends on I2C && SENSORS_ACCTON_I2C_CPLD ++ help ++ If you say yes here you get support for Accton as7716 32x fan. ++ ++ This driver can also be built as a module. If so, the module will ++ be called accton_as7716_32x_fan. ++ ++config SENSORS_ACCTON_AS7716_32x_PSU ++ tristate "Accton as7716 32x psu" ++ depends on I2C && SENSORS_ACCTON_I2C_CPLD ++ help ++ If you say yes here you get support for Accton as7716 32x psu. ++ ++ This driver can also be built as a module. If so, the module will ++ be called accton_as7716_32x_psu. ++ + if ACPI + + comment "ACPI drivers" +diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile +index de922bc..9210ab0 100644 +--- a/drivers/hwmon/Makefile ++++ b/drivers/hwmon/Makefile +@@ -36,6 +36,8 @@ obj-$(CONFIG_SENSORS_ACCTON_AS6812_32x_FAN) += accton_as6812_32x_fan.o + obj-$(CONFIG_SENSORS_ACCTON_AS6812_32x_PSU) += accton_as6812_32x_psu.o + obj-$(CONFIG_SENSORS_ACCTON_AS5812_54t_FAN) += accton_as5812_54t_fan.o + obj-$(CONFIG_SENSORS_ACCTON_AS5812_54t_PSU) += accton_as5812_54t_psu.o ++obj-$(CONFIG_SENSORS_ACCTON_AS7716_32x_FAN) += accton_as7716_32x_fan.o ++obj-$(CONFIG_SENSORS_ACCTON_AS7716_32x_PSU) += accton_as7716_32x_psu.o + obj-$(CONFIG_SENSORS_AD7314) += ad7314.o + obj-$(CONFIG_SENSORS_AD7414) += ad7414.o + obj-$(CONFIG_SENSORS_AD7418) += ad7418.o +diff --git a/drivers/hwmon/accton_as7716_32x_fan.c b/drivers/hwmon/accton_as7716_32x_fan.c +new file mode 100644 +index 0000000..924374c +--- /dev/null ++++ b/drivers/hwmon/accton_as7716_32x_fan.c +@@ -0,0 +1,452 @@ ++/* ++ * A hwmon driver for the Accton as7716 32x fan ++ * ++ * Copyright (C) 2014 Accton Technology Corporation. ++ * Brandon Chuang ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; either version 2 of the License, or ++ * (at your option) any later version. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program; if not, write to the Free Software ++ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#define DRVNAME "as7716_32x_fan" ++ ++static struct as7716_32x_fan_data *as7716_32x_fan_update_device(struct device *dev); ++static ssize_t fan_show_value(struct device *dev, struct device_attribute *da, char *buf); ++static ssize_t set_duty_cycle(struct device *dev, struct device_attribute *da, ++ const char *buf, size_t count); ++extern int accton_i2c_cpld_read(unsigned short cpld_addr, u8 reg); ++extern int accton_i2c_cpld_write(unsigned short cpld_addr, u8 reg, u8 value); ++ ++/* fan related data, the index should match sysfs_fan_attributes ++ */ ++static const u8 fan_reg[] = { ++ 0x0F, /* fan 1-6 present status */ ++ 0x11, /* fan PWM(for all fan) */ ++ 0x12, /* front fan 1 speed(rpm) */ ++ 0x13, /* front fan 2 speed(rpm) */ ++ 0x14, /* front fan 3 speed(rpm) */ ++ 0x15, /* front fan 4 speed(rpm) */ ++ 0x16, /* front fan 5 speed(rpm) */ ++ 0x17, /* front fan 6 speed(rpm) */ ++ 0x22, /* rear fan 1 speed(rpm) */ ++ 0x23, /* rear fan 2 speed(rpm) */ ++ 0x24, /* rear fan 3 speed(rpm) */ ++ 0x25, /* rear fan 4 speed(rpm) */ ++ 0x26, /* rear fan 5 speed(rpm) */ ++ 0x27, /* rear fan 6 speed(rpm) */ ++}; ++ ++/* Each client has this additional data */ ++struct as7716_32x_fan_data { ++ struct device *hwmon_dev; ++ struct mutex update_lock; ++ char valid; /* != 0 if registers are valid */ ++ unsigned long last_updated; /* In jiffies */ ++ u8 reg_val[ARRAY_SIZE(fan_reg)]; /* Register value */ ++}; ++ ++enum fan_id { ++ FAN1_ID, ++ FAN2_ID, ++ FAN3_ID, ++ FAN4_ID, ++ FAN5_ID, ++ FAN6_ID ++}; ++ ++enum sysfs_fan_attributes { ++ FAN_PRESENT_REG, ++ FAN_DUTY_CYCLE_PERCENTAGE, /* Only one CPLD register to control duty cycle for all fans */ ++ FAN1_FRONT_SPEED_RPM, ++ FAN2_FRONT_SPEED_RPM, ++ FAN3_FRONT_SPEED_RPM, ++ FAN4_FRONT_SPEED_RPM, ++ FAN5_FRONT_SPEED_RPM, ++ FAN6_FRONT_SPEED_RPM, ++ FAN1_REAR_SPEED_RPM, ++ FAN2_REAR_SPEED_RPM, ++ FAN3_REAR_SPEED_RPM, ++ FAN4_REAR_SPEED_RPM, ++ FAN5_REAR_SPEED_RPM, ++ FAN6_REAR_SPEED_RPM, ++ FAN1_PRESENT, ++ FAN2_PRESENT, ++ FAN3_PRESENT, ++ FAN4_PRESENT, ++ FAN5_PRESENT, ++ FAN6_PRESENT, ++ FAN1_FAULT, ++ FAN2_FAULT, ++ FAN3_FAULT, ++ FAN4_FAULT, ++ FAN5_FAULT, ++ FAN6_FAULT ++}; ++ ++/* Define attributes ++ */ ++#define DECLARE_FAN_FAULT_SENSOR_DEV_ATTR(index) \ ++ static SENSOR_DEVICE_ATTR(fan##index##_fault, S_IRUGO, fan_show_value, NULL, FAN##index##_FAULT) ++#define DECLARE_FAN_FAULT_ATTR(index) &sensor_dev_attr_fan##index##_fault.dev_attr.attr ++ ++#define DECLARE_FAN_DIRECTION_SENSOR_DEV_ATTR(index) \ ++ static SENSOR_DEVICE_ATTR(fan##index##_direction, S_IRUGO, fan_show_value, NULL, FAN##index##_DIRECTION) ++#define DECLARE_FAN_DIRECTION_ATTR(index) &sensor_dev_attr_fan##index##_direction.dev_attr.attr ++ ++#define DECLARE_FAN_DUTY_CYCLE_SENSOR_DEV_ATTR(index) \ ++ static SENSOR_DEVICE_ATTR(fan##index##_duty_cycle_percentage, S_IWUSR | S_IRUGO, fan_show_value, set_duty_cycle, FAN##index##_DUTY_CYCLE_PERCENTAGE) ++#define DECLARE_FAN_DUTY_CYCLE_ATTR(index) &sensor_dev_attr_fan##index##_duty_cycle_percentage.dev_attr.attr ++ ++#define DECLARE_FAN_PRESENT_SENSOR_DEV_ATTR(index) \ ++ static SENSOR_DEVICE_ATTR(fan##index##_present, S_IRUGO, fan_show_value, NULL, FAN##index##_PRESENT) ++#define DECLARE_FAN_PRESENT_ATTR(index) &sensor_dev_attr_fan##index##_present.dev_attr.attr ++ ++#define DECLARE_FAN_SPEED_RPM_SENSOR_DEV_ATTR(index) \ ++ static SENSOR_DEVICE_ATTR(fan##index##_front_speed_rpm, S_IRUGO, fan_show_value, NULL, FAN##index##_FRONT_SPEED_RPM);\ ++ static SENSOR_DEVICE_ATTR(fan##index##_rear_speed_rpm, S_IRUGO, fan_show_value, NULL, FAN##index##_REAR_SPEED_RPM) ++#define DECLARE_FAN_SPEED_RPM_ATTR(index) &sensor_dev_attr_fan##index##_front_speed_rpm.dev_attr.attr, \ ++ &sensor_dev_attr_fan##index##_rear_speed_rpm.dev_attr.attr ++ ++/* 6 fan fault attributes in this platform */ ++DECLARE_FAN_FAULT_SENSOR_DEV_ATTR(1); ++DECLARE_FAN_FAULT_SENSOR_DEV_ATTR(2); ++DECLARE_FAN_FAULT_SENSOR_DEV_ATTR(3); ++DECLARE_FAN_FAULT_SENSOR_DEV_ATTR(4); ++DECLARE_FAN_FAULT_SENSOR_DEV_ATTR(5); ++DECLARE_FAN_FAULT_SENSOR_DEV_ATTR(6); ++/* 6 fan speed(rpm) attributes in this platform */ ++DECLARE_FAN_SPEED_RPM_SENSOR_DEV_ATTR(1); ++DECLARE_FAN_SPEED_RPM_SENSOR_DEV_ATTR(2); ++DECLARE_FAN_SPEED_RPM_SENSOR_DEV_ATTR(3); ++DECLARE_FAN_SPEED_RPM_SENSOR_DEV_ATTR(4); ++DECLARE_FAN_SPEED_RPM_SENSOR_DEV_ATTR(5); ++DECLARE_FAN_SPEED_RPM_SENSOR_DEV_ATTR(6); ++/* 6 fan present attributes in this platform */ ++DECLARE_FAN_PRESENT_SENSOR_DEV_ATTR(1); ++DECLARE_FAN_PRESENT_SENSOR_DEV_ATTR(2); ++DECLARE_FAN_PRESENT_SENSOR_DEV_ATTR(3); ++DECLARE_FAN_PRESENT_SENSOR_DEV_ATTR(4); ++DECLARE_FAN_PRESENT_SENSOR_DEV_ATTR(5); ++DECLARE_FAN_PRESENT_SENSOR_DEV_ATTR(6); ++/* 1 fan duty cycle attribute in this platform */ ++DECLARE_FAN_DUTY_CYCLE_SENSOR_DEV_ATTR(); ++ ++static struct attribute *as7716_32x_fan_attributes[] = { ++ /* fan related attributes */ ++ DECLARE_FAN_FAULT_ATTR(1), ++ DECLARE_FAN_FAULT_ATTR(2), ++ DECLARE_FAN_FAULT_ATTR(3), ++ DECLARE_FAN_FAULT_ATTR(4), ++ DECLARE_FAN_FAULT_ATTR(5), ++ DECLARE_FAN_FAULT_ATTR(6), ++ DECLARE_FAN_SPEED_RPM_ATTR(1), ++ DECLARE_FAN_SPEED_RPM_ATTR(2), ++ DECLARE_FAN_SPEED_RPM_ATTR(3), ++ DECLARE_FAN_SPEED_RPM_ATTR(4), ++ DECLARE_FAN_SPEED_RPM_ATTR(5), ++ DECLARE_FAN_SPEED_RPM_ATTR(6), ++ DECLARE_FAN_PRESENT_ATTR(1), ++ DECLARE_FAN_PRESENT_ATTR(2), ++ DECLARE_FAN_PRESENT_ATTR(3), ++ DECLARE_FAN_PRESENT_ATTR(4), ++ DECLARE_FAN_PRESENT_ATTR(5), ++ DECLARE_FAN_PRESENT_ATTR(6), ++ DECLARE_FAN_DUTY_CYCLE_ATTR(), ++ NULL ++}; ++ ++#define FAN_DUTY_CYCLE_REG_MASK 0xF ++#define FAN_MAX_DUTY_CYCLE 100 ++#define FAN_REG_VAL_TO_SPEED_RPM_STEP 100 ++ ++static int as7716_32x_fan_read_value(struct i2c_client *client, u8 reg) ++{ ++ return i2c_smbus_read_byte_data(client, reg); ++} ++ ++static int as7716_32x_fan_write_value(struct i2c_client *client, u8 reg, u8 value) ++{ ++ return i2c_smbus_write_byte_data(client, reg, value); ++} ++ ++/* fan utility functions ++ */ ++static u32 reg_val_to_duty_cycle(u8 reg_val) ++{ ++ reg_val &= FAN_DUTY_CYCLE_REG_MASK; ++ return ((u32)(reg_val+1) * 625 + 75)/ 100; ++} ++ ++static u8 duty_cycle_to_reg_val(u8 duty_cycle) ++{ ++ return ((u32)duty_cycle * 100 / 625) - 1; ++} ++ ++static u32 reg_val_to_speed_rpm(u8 reg_val) ++{ ++ return (u32)reg_val * FAN_REG_VAL_TO_SPEED_RPM_STEP; ++} ++ ++static u8 reg_val_to_is_present(u8 reg_val, enum fan_id id) ++{ ++ u8 mask = (1 << id); ++ ++ reg_val &= mask; ++ ++ return reg_val ? 0 : 1; ++} ++ ++static u8 is_fan_fault(struct as7716_32x_fan_data *data, enum fan_id id) ++{ ++ u8 ret = 1; ++ int front_fan_index = FAN1_FRONT_SPEED_RPM + id; ++ int rear_fan_index = FAN1_REAR_SPEED_RPM + id; ++ ++ /* Check if the speed of front or rear fan is ZERO, ++ */ ++ if (reg_val_to_speed_rpm(data->reg_val[front_fan_index]) && ++ reg_val_to_speed_rpm(data->reg_val[rear_fan_index])) { ++ ret = 0; ++ } ++ ++ return ret; ++} ++ ++static ssize_t set_duty_cycle(struct device *dev, struct device_attribute *da, ++ const char *buf, size_t count) ++{ ++ int error, value; ++ struct i2c_client *client = to_i2c_client(dev); ++ ++ error = kstrtoint(buf, 10, &value); ++ if (error) ++ return error; ++ ++ if (value < 0 || value > FAN_MAX_DUTY_CYCLE) ++ return -EINVAL; ++ ++ as7716_32x_fan_write_value(client, 0x33, 0); /* Disable fan speed watch dog */ ++ as7716_32x_fan_write_value(client, fan_reg[FAN_DUTY_CYCLE_PERCENTAGE], duty_cycle_to_reg_val(value)); ++ return count; ++} ++ ++static ssize_t fan_show_value(struct device *dev, struct device_attribute *da, ++ char *buf) ++{ ++ struct sensor_device_attribute *attr = to_sensor_dev_attr(da); ++ struct as7716_32x_fan_data *data = as7716_32x_fan_update_device(dev); ++ ssize_t ret = 0; ++ ++ if (data->valid) { ++ switch (attr->index) { ++ case FAN_DUTY_CYCLE_PERCENTAGE: ++ { ++ u32 duty_cycle = reg_val_to_duty_cycle(data->reg_val[FAN_DUTY_CYCLE_PERCENTAGE]); ++ ret = sprintf(buf, "%u\n", duty_cycle); ++ break; ++ } ++ case FAN1_FRONT_SPEED_RPM: ++ case FAN2_FRONT_SPEED_RPM: ++ case FAN3_FRONT_SPEED_RPM: ++ case FAN4_FRONT_SPEED_RPM: ++ case FAN5_FRONT_SPEED_RPM: ++ case FAN6_FRONT_SPEED_RPM: ++ case FAN1_REAR_SPEED_RPM: ++ case FAN2_REAR_SPEED_RPM: ++ case FAN3_REAR_SPEED_RPM: ++ case FAN4_REAR_SPEED_RPM: ++ case FAN5_REAR_SPEED_RPM: ++ case FAN6_REAR_SPEED_RPM: ++ ret = sprintf(buf, "%u\n", reg_val_to_speed_rpm(data->reg_val[attr->index])); ++ break; ++ case FAN1_PRESENT: ++ case FAN2_PRESENT: ++ case FAN3_PRESENT: ++ case FAN4_PRESENT: ++ case FAN5_PRESENT: ++ case FAN6_PRESENT: ++ ret = sprintf(buf, "%d\n", ++ reg_val_to_is_present(data->reg_val[FAN_PRESENT_REG], ++ attr->index - FAN1_PRESENT)); ++ break; ++ case FAN1_FAULT: ++ case FAN2_FAULT: ++ case FAN3_FAULT: ++ case FAN4_FAULT: ++ case FAN5_FAULT: ++ case FAN6_FAULT: ++ ret = sprintf(buf, "%d\n", is_fan_fault(data, attr->index - FAN1_FAULT)); ++ break; ++ default: ++ break; ++ } ++ } ++ ++ return ret; ++} ++ ++static const struct attribute_group as7716_32x_fan_group = { ++ .attrs = as7716_32x_fan_attributes, ++}; ++ ++static struct as7716_32x_fan_data *as7716_32x_fan_update_device(struct device *dev) ++{ ++ struct i2c_client *client = to_i2c_client(dev); ++ struct as7716_32x_fan_data *data = i2c_get_clientdata(client); ++ ++ mutex_lock(&data->update_lock); ++ ++ if (time_after(jiffies, data->last_updated + HZ + HZ / 2) || ++ !data->valid) { ++ int i; ++ ++ dev_dbg(&client->dev, "Starting as7716_32x_fan update\n"); ++ data->valid = 0; ++ ++ /* Update fan data ++ */ ++ for (i = 0; i < ARRAY_SIZE(data->reg_val); i++) { ++ int status = as7716_32x_fan_read_value(client, fan_reg[i]); ++ ++ if (status < 0) { ++ data->valid = 0; ++ mutex_unlock(&data->update_lock); ++ dev_dbg(&client->dev, "reg %d, err %d\n", fan_reg[i], status); ++ return data; ++ } ++ else { ++ data->reg_val[i] = status; ++ } ++ } ++ ++ data->last_updated = jiffies; ++ data->valid = 1; ++ } ++ ++ mutex_unlock(&data->update_lock); ++ ++ return data; ++} ++ ++static int as7716_32x_fan_probe(struct i2c_client *client, ++ const struct i2c_device_id *dev_id) ++{ ++ struct as7716_32x_fan_data *data; ++ int status; ++ ++ if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA)) { ++ status = -EIO; ++ goto exit; ++ } ++ ++ data = kzalloc(sizeof(struct as7716_32x_fan_data), GFP_KERNEL); ++ if (!data) { ++ status = -ENOMEM; ++ goto exit; ++ } ++ ++ i2c_set_clientdata(client, data); ++ data->valid = 0; ++ mutex_init(&data->update_lock); ++ ++ dev_info(&client->dev, "chip found\n"); ++ ++ /* Register sysfs hooks */ ++ status = sysfs_create_group(&client->dev.kobj, &as7716_32x_fan_group); ++ if (status) { ++ goto exit_free; ++ } ++ ++ data->hwmon_dev = hwmon_device_register(&client->dev); ++ if (IS_ERR(data->hwmon_dev)) { ++ status = PTR_ERR(data->hwmon_dev); ++ goto exit_remove; ++ } ++ ++ dev_info(&client->dev, "%s: fan '%s'\n", ++ dev_name(data->hwmon_dev), client->name); ++ ++ return 0; ++ ++exit_remove: ++ sysfs_remove_group(&client->dev.kobj, &as7716_32x_fan_group); ++exit_free: ++ kfree(data); ++exit: ++ ++ return status; ++} ++ ++static int as7716_32x_fan_remove(struct i2c_client *client) ++{ ++ struct as7716_32x_fan_data *data = i2c_get_clientdata(client); ++ hwmon_device_unregister(data->hwmon_dev); ++ sysfs_remove_group(&client->dev.kobj, &as7716_32x_fan_group); ++ ++ return 0; ++} ++ ++/* Addresses to scan */ ++static const unsigned short normal_i2c[] = { 0x66, I2C_CLIENT_END }; ++ ++static const struct i2c_device_id as7716_32x_fan_id[] = { ++ { "as7716_32x_fan", 0 }, ++ {} ++}; ++MODULE_DEVICE_TABLE(i2c, as7716_32x_fan_id); ++ ++static struct i2c_driver as7716_32x_fan_driver = { ++ .class = I2C_CLASS_HWMON, ++ .driver = { ++ .name = DRVNAME, ++ }, ++ .probe = as7716_32x_fan_probe, ++ .remove = as7716_32x_fan_remove, ++ .id_table = as7716_32x_fan_id, ++ .address_list = normal_i2c, ++}; ++ ++static int __init as7716_32x_fan_init(void) ++{ ++ extern int platform_accton_as7716_32x(void); ++ if (!platform_accton_as7716_32x()) { ++ return -ENODEV; ++ } ++ ++ return i2c_add_driver(&as7716_32x_fan_driver); ++} ++ ++static void __exit as7716_32x_fan_exit(void) ++{ ++ i2c_del_driver(&as7716_32x_fan_driver); ++} ++ ++module_init(as7716_32x_fan_init); ++module_exit(as7716_32x_fan_exit); ++ ++MODULE_AUTHOR("Brandon Chuang "); ++MODULE_DESCRIPTION("as7716_32x_fan driver"); ++MODULE_LICENSE("GPL"); ++ +diff --git a/drivers/hwmon/accton_as7716_32x_psu.c b/drivers/hwmon/accton_as7716_32x_psu.c +new file mode 100644 +index 0000000..4fd15ae +--- /dev/null ++++ b/drivers/hwmon/accton_as7716_32x_psu.c +@@ -0,0 +1,293 @@ ++/* ++ * An hwmon driver for accton as7716_32x Power Module ++ * ++ * Copyright (C) 2014 Accton Technology Corporation. ++ * Brandon Chuang ++ * ++ * Based on ad7414.c ++ * Copyright 2006 Stefan Roese , DENX Software Engineering ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; either version 2 of the License, or ++ * (at your option) any later version. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program; if not, write to the Free Software ++ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++static ssize_t show_status(struct device *dev, struct device_attribute *da, char *buf); ++static ssize_t show_model_name(struct device *dev, struct device_attribute *da, char *buf); ++static int as7716_32x_psu_read_block(struct i2c_client *client, u8 command, u8 *data,int data_len); ++extern int accton_i2c_cpld_read(unsigned short cpld_addr, u8 reg); ++ ++/* Addresses scanned ++ */ ++static const unsigned short normal_i2c[] = { 0x50, 0x53, I2C_CLIENT_END }; ++ ++/* Each client has this additional data ++ */ ++struct as7716_32x_psu_data { ++ struct device *hwmon_dev; ++ struct mutex update_lock; ++ char valid; /* !=0 if registers are valid */ ++ unsigned long last_updated; /* In jiffies */ ++ u8 index; /* PSU index */ ++ u8 status; /* Status(present/power_good) register read from CPLD */ ++ char model_name[9]; /* Model name, read from eeprom */ ++}; ++ ++static struct as7716_32x_psu_data *as7716_32x_psu_update_device(struct device *dev); ++ ++enum as7716_32x_psu_sysfs_attributes { ++ PSU_PRESENT, ++ PSU_MODEL_NAME, ++ PSU_POWER_GOOD ++}; ++ ++/* sysfs attributes for hwmon ++ */ ++static SENSOR_DEVICE_ATTR(psu_present, S_IRUGO, show_status, NULL, PSU_PRESENT); ++static SENSOR_DEVICE_ATTR(psu_model_name, S_IRUGO, show_model_name,NULL, PSU_MODEL_NAME); ++static SENSOR_DEVICE_ATTR(psu_power_good, S_IRUGO, show_status, NULL, PSU_POWER_GOOD); ++ ++static struct attribute *as7716_32x_psu_attributes[] = { ++ &sensor_dev_attr_psu_present.dev_attr.attr, ++ &sensor_dev_attr_psu_model_name.dev_attr.attr, ++ &sensor_dev_attr_psu_power_good.dev_attr.attr, ++ NULL ++}; ++ ++static ssize_t show_status(struct device *dev, struct device_attribute *da, ++ char *buf) ++{ ++ struct sensor_device_attribute *attr = to_sensor_dev_attr(da); ++ struct as7716_32x_psu_data *data = as7716_32x_psu_update_device(dev); ++ u8 status = 0; ++ ++ if (attr->index == PSU_PRESENT) { ++ status = !(data->status >> (1-data->index) & 0x1); ++ } ++ else { /* PSU_POWER_GOOD */ ++ status = (data->status >> (3-data->index) & 0x1); ++ } ++ ++ return sprintf(buf, "%d\n", status); ++} ++ ++static ssize_t show_model_name(struct device *dev, struct device_attribute *da, ++ char *buf) ++{ ++ struct as7716_32x_psu_data *data = as7716_32x_psu_update_device(dev); ++ ++ return sprintf(buf, "%s\n", data->model_name); ++} ++ ++static const struct attribute_group as7716_32x_psu_group = { ++ .attrs = as7716_32x_psu_attributes, ++}; ++ ++static int as7716_32x_psu_probe(struct i2c_client *client, ++ const struct i2c_device_id *dev_id) ++{ ++ struct as7716_32x_psu_data *data; ++ int status; ++ ++ if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_I2C_BLOCK)) { ++ status = -EIO; ++ goto exit; ++ } ++ ++ data = kzalloc(sizeof(struct as7716_32x_psu_data), GFP_KERNEL); ++ if (!data) { ++ status = -ENOMEM; ++ goto exit; ++ } ++ ++ i2c_set_clientdata(client, data); ++ data->valid = 0; ++ data->index = dev_id->driver_data; ++ mutex_init(&data->update_lock); ++ ++ dev_info(&client->dev, "chip found\n"); ++ ++ /* Register sysfs hooks */ ++ status = sysfs_create_group(&client->dev.kobj, &as7716_32x_psu_group); ++ if (status) { ++ goto exit_free; ++ } ++ ++ data->hwmon_dev = hwmon_device_register(&client->dev); ++ if (IS_ERR(data->hwmon_dev)) { ++ status = PTR_ERR(data->hwmon_dev); ++ goto exit_remove; ++ } ++ ++ dev_info(&client->dev, "%s: psu '%s'\n", ++ dev_name(data->hwmon_dev), client->name); ++ ++ return 0; ++ ++exit_remove: ++ sysfs_remove_group(&client->dev.kobj, &as7716_32x_psu_group); ++exit_free: ++ kfree(data); ++exit: ++ ++ return status; ++} ++ ++static int as7716_32x_psu_remove(struct i2c_client *client) ++{ ++ struct as7716_32x_psu_data *data = i2c_get_clientdata(client); ++ ++ hwmon_device_unregister(data->hwmon_dev); ++ sysfs_remove_group(&client->dev.kobj, &as7716_32x_psu_group); ++ kfree(data); ++ ++ return 0; ++} ++ ++enum psu_index ++{ ++ as7716_32x_psu1, ++ as7716_32x_psu2 ++}; ++ ++static const struct i2c_device_id as7716_32x_psu_id[] = { ++ { "as7716_32x_psu1", as7716_32x_psu1 }, ++ { "as7716_32x_psu2", as7716_32x_psu2 }, ++ {} ++}; ++MODULE_DEVICE_TABLE(i2c, as7716_32x_psu_id); ++ ++static struct i2c_driver as7716_32x_psu_driver = { ++ .class = I2C_CLASS_HWMON, ++ .driver = { ++ .name = "as7716_32x_psu", ++ }, ++ .probe = as7716_32x_psu_probe, ++ .remove = as7716_32x_psu_remove, ++ .id_table = as7716_32x_psu_id, ++ .address_list = normal_i2c, ++}; ++ ++static int as7716_32x_psu_read_block(struct i2c_client *client, u8 command, u8 *data, ++ int data_len) ++{ ++ int result = 0; ++ int retry_count = 5; ++ ++ while (retry_count) { ++ retry_count--; ++ ++ result = i2c_smbus_read_i2c_block_data(client, command, data_len, data); ++ ++ if (unlikely(result < 0)) { ++ msleep(10); ++ continue; ++ } ++ ++ if (unlikely(result != data_len)) { ++ result = -EIO; ++ msleep(10); ++ continue; ++ } ++ ++ result = 0; ++ break; ++ } ++ ++ return result; ++} ++ ++static struct as7716_32x_psu_data *as7716_32x_psu_update_device(struct device *dev) ++{ ++ struct i2c_client *client = to_i2c_client(dev); ++ struct as7716_32x_psu_data *data = i2c_get_clientdata(client); ++ ++ mutex_lock(&data->update_lock); ++ ++ if (time_after(jiffies, data->last_updated + HZ + HZ / 2) ++ || !data->valid) { ++ int status; ++ int power_good = 0; ++ ++ dev_dbg(&client->dev, "Starting as7716_32x update\n"); ++ ++ /* Read psu status */ ++ status = accton_i2c_cpld_read(0x60, 0x2); ++ ++ if (status < 0) { ++ dev_dbg(&client->dev, "cpld reg 0x60 err %d\n", status); ++ } ++ else { ++ data->status = status; ++ } ++ ++ /* Read model name */ ++ memset(data->model_name, 0, sizeof(data->model_name)); ++ power_good = (data->status >> (3-data->index) & 0x1); ++ ++ if (power_good) { ++ status = as7716_32x_psu_read_block(client, 0x20, data->model_name, ++ ARRAY_SIZE(data->model_name)-1); ++ ++ if (status < 0) { ++ data->model_name[0] = '\0'; ++ dev_dbg(&client->dev, "unable to read model name from (0x%x)\n", client->addr); ++ } ++ else { ++ data->model_name[ARRAY_SIZE(data->model_name)-1] = '\0'; ++ } ++ } ++ ++ data->last_updated = jiffies; ++ data->valid = 1; ++ } ++ ++ mutex_unlock(&data->update_lock); ++ ++ return data; ++} ++ ++static int __init as7716_32x_psu_init(void) ++{ ++ extern int platform_accton_as7716_32x(void); ++ if (!platform_accton_as7716_32x()) { ++ return -ENODEV; ++ } ++ ++ return i2c_add_driver(&as7716_32x_psu_driver); ++} ++ ++static void __exit as7716_32x_psu_exit(void) ++{ ++ i2c_del_driver(&as7716_32x_psu_driver); ++} ++ ++module_init(as7716_32x_psu_init); ++module_exit(as7716_32x_psu_exit); ++ ++MODULE_AUTHOR("Brandon Chuang "); ++MODULE_DESCRIPTION("as7716_32x_psu driver"); ++MODULE_LICENSE("GPL"); ++ +diff --git a/drivers/hwmon/accton_i2c_cpld.c b/drivers/hwmon/accton_i2c_cpld.c +index acf88c9..95202ec 100644 +--- a/drivers/hwmon/accton_i2c_cpld.c ++++ b/drivers/hwmon/accton_i2c_cpld.c +@@ -255,6 +255,22 @@ int platform_accton_as5812_54t(void) + } + EXPORT_SYMBOL(platform_accton_as5812_54t); + ++static struct dmi_system_id as7716_dmi_table[] = { ++ { ++ .ident = "Accton AS7716", ++ .matches = { ++ DMI_MATCH(DMI_SYS_VENDOR, "Accton"), ++ DMI_MATCH(DMI_PRODUCT_NAME, "AS7716"), ++ }, ++ } ++}; ++ ++int platform_accton_as7716_32x(void) ++{ ++ return dmi_check_system(as7716_dmi_table); ++} ++EXPORT_SYMBOL(platform_accton_as7716_32x); ++ + MODULE_AUTHOR("Brandon Chuang "); + MODULE_DESCRIPTION("accton_i2c_cpld driver"); + MODULE_LICENSE("GPL"); +diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig +index 599b97b..bdfb18e 100644 +--- a/drivers/leds/Kconfig ++++ b/drivers/leds/Kconfig +@@ -88,7 +88,14 @@ config LEDS_ACCTON_AS5812_54t + help + This option enables support for the LEDs on the Accton as5812 54t. + Say Y to enable LEDs on the Accton as5812 54t. +- ++ ++config LEDS_ACCTON_AS7716_32x ++ tristate "LED support for the Accton as7716 32x" ++ depends on LEDS_CLASS && SENSORS_ACCTON_I2C_CPLD ++ help ++ This option enables support for the LEDs on the Accton as7716 32x. ++ Say Y to enable LEDs on the Accton as7716 32x. ++ + config LEDS_LM3530 + tristate "LCD Backlight driver for LM3530" + depends on LEDS_CLASS +diff --git a/drivers/leds/Makefile b/drivers/leds/Makefile +index bd20baa..58b1a80 100644 +--- a/drivers/leds/Makefile ++++ b/drivers/leds/Makefile +@@ -50,7 +50,7 @@ obj-$(CONFIG_LEDS_ACCTON_AS7712_32x) += leds-accton_as7712_32x.o + obj-$(CONFIG_LEDS_ACCTON_AS5812_54x) += leds-accton_as5812_54x.o + obj-$(CONFIG_LEDS_ACCTON_AS6812_32x) += leds-accton_as6812_32x.o + obj-$(CONFIG_LEDS_ACCTON_AS5812_54t) += leds-accton_as5812_54t.o +- ++obj-$(CONFIG_LEDS_ACCTON_AS7716_32x) += leds-accton_as7716_32x.o + # LED SPI Drivers + obj-$(CONFIG_LEDS_DAC124S085) += leds-dac124s085.o + +diff --git a/drivers/leds/leds-accton_as7716_32x.c b/drivers/leds/leds-accton_as7716_32x.c +new file mode 100644 +index 0000000..5a84897 +--- /dev/null ++++ b/drivers/leds/leds-accton_as7716_32x.c +@@ -0,0 +1,443 @@ ++/* ++ * A LED driver for the accton_as7716_32x_led ++ * ++ * Copyright (C) 2014 Accton Technology Corporation. ++ * Brandon Chuang ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; either version 2 of the License, or ++ * (at your option) any later version. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program; if not, write to the Free Software ++ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. ++ */ ++ ++/*#define DEBUG*/ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++extern int accton_i2c_cpld_read (unsigned short cpld_addr, u8 reg); ++extern int accton_i2c_cpld_write(unsigned short cpld_addr, u8 reg, u8 value); ++ ++extern void led_classdev_unregister(struct led_classdev *led_cdev); ++extern int led_classdev_register(struct device *parent, struct led_classdev *led_cdev); ++extern void led_classdev_resume(struct led_classdev *led_cdev); ++extern void led_classdev_suspend(struct led_classdev *led_cdev); ++ ++#define DRVNAME "accton_as7716_32x_led" ++ ++struct accton_as7716_32x_led_data { ++ struct platform_device *pdev; ++ struct mutex update_lock; ++ char valid; /* != 0 if registers are valid */ ++ unsigned long last_updated; /* In jiffies */ ++ u8 reg_val[1]; /* only 1 register*/ ++}; ++ ++static struct accton_as7716_32x_led_data *ledctl = NULL; ++ ++/* LED related data ++ */ ++ ++#define LED_CNTRLER_I2C_ADDRESS (0x60) ++ ++#define LED_TYPE_DIAG_REG_MASK (0x3) ++#define LED_MODE_DIAG_GREEN_VALUE (0x02) ++#define LED_MODE_DIAG_RED_VALUE (0x01) ++#define LED_MODE_DIAG_AMBER_VALUE (0x00) /*It's yellow actually. Green+Red=Yellow*/ ++#define LED_MODE_DIAG_OFF_VALUE (0x03) ++ ++ ++#define LED_TYPE_LOC_REG_MASK (0x80) ++#define LED_MODE_LOC_ON_VALUE (0) ++#define LED_MODE_LOC_OFF_VALUE (0x80) ++ ++enum led_type { ++ LED_TYPE_DIAG, ++ LED_TYPE_LOC, ++ LED_TYPE_FAN, ++ LED_TYPE_PSU1, ++ LED_TYPE_PSU2 ++}; ++ ++struct led_reg { ++ u32 types; ++ u8 reg_addr; ++}; ++ ++static const struct led_reg led_reg_map[] = { ++ {(1<update_lock); ++ ++ if (time_after(jiffies, ledctl->last_updated + HZ + HZ / 2) ++ || !ledctl->valid) { ++ int i; ++ ++ dev_dbg(&ledctl->pdev->dev, "Starting accton_as7716_32x_led update\n"); ++ ++ /* Update LED data ++ */ ++ for (i = 0; i < ARRAY_SIZE(ledctl->reg_val); i++) { ++ int status = accton_as7716_32x_led_read_value(led_reg_map[i].reg_addr); ++ ++ if (status < 0) { ++ ledctl->valid = 0; ++ dev_dbg(&ledctl->pdev->dev, "reg %d, err %d\n", led_reg_map[i].reg_addr, status); ++ goto exit; ++ } ++ else ++ { ++ ledctl->reg_val[i] = status; ++ } ++ } ++ ++ ledctl->last_updated = jiffies; ++ ledctl->valid = 1; ++ } ++ ++exit: ++ mutex_unlock(&ledctl->update_lock); ++} ++ ++static void accton_as7716_32x_led_set(struct led_classdev *led_cdev, ++ enum led_brightness led_light_mode, ++ enum led_type type) ++{ ++ int reg_val; ++ u8 reg ; ++ mutex_lock(&ledctl->update_lock); ++ ++ if( !accton_getLedReg(type, ®)) ++ { ++ dev_dbg(&ledctl->pdev->dev, "Not match item for %d.\n", type); ++ } ++ ++ reg_val = accton_as7716_32x_led_read_value(reg); ++ ++ if (reg_val < 0) { ++ dev_dbg(&ledctl->pdev->dev, "reg %d, err %d\n", reg, reg_val); ++ goto exit; ++ } ++ reg_val = led_light_mode_to_reg_val(type, led_light_mode, reg_val); ++ accton_as7716_32x_led_write_value(reg, reg_val); ++ ++ /* to prevent the slow-update issue */ ++ ledctl->valid = 0; ++ ++exit: ++ mutex_unlock(&ledctl->update_lock); ++} ++ ++ ++static void accton_as7716_32x_led_diag_set(struct led_classdev *led_cdev, ++ enum led_brightness led_light_mode) ++{ ++ accton_as7716_32x_led_set(led_cdev, led_light_mode, LED_TYPE_DIAG); ++} ++ ++static enum led_brightness accton_as7716_32x_led_diag_get(struct led_classdev *cdev) ++{ ++ accton_as7716_32x_led_update(); ++ return led_reg_val_to_light_mode(LED_TYPE_DIAG, ledctl->reg_val[0]); ++} ++ ++static void accton_as7716_32x_led_loc_set(struct led_classdev *led_cdev, ++ enum led_brightness led_light_mode) ++{ ++ accton_as7716_32x_led_set(led_cdev, led_light_mode, LED_TYPE_LOC); ++} ++ ++static enum led_brightness accton_as7716_32x_led_loc_get(struct led_classdev *cdev) ++{ ++ accton_as7716_32x_led_update(); ++ return led_reg_val_to_light_mode(LED_TYPE_LOC, ledctl->reg_val[0]); ++} ++ ++static void accton_as7716_32x_led_auto_set(struct led_classdev *led_cdev, ++ enum led_brightness led_light_mode) ++{ ++} ++ ++static enum led_brightness accton_as7716_32x_led_auto_get(struct led_classdev *cdev) ++{ ++ return LED_MODE_AUTO; ++} ++ ++static struct led_classdev accton_as7716_32x_leds[] = { ++ [LED_TYPE_DIAG] = { ++ .name = "accton_as7716_32x_led::diag", ++ .default_trigger = "unused", ++ .brightness_set = accton_as7716_32x_led_diag_set, ++ .brightness_get = accton_as7716_32x_led_diag_get, ++ .flags = LED_CORE_SUSPENDRESUME, ++ .max_brightness = LED_MODE_RED, ++ }, ++ [LED_TYPE_LOC] = { ++ .name = "accton_as7716_32x_led::loc", ++ .default_trigger = "unused", ++ .brightness_set = accton_as7716_32x_led_loc_set, ++ .brightness_get = accton_as7716_32x_led_loc_get, ++ .flags = LED_CORE_SUSPENDRESUME, ++ .max_brightness = LED_MODE_BLUE, ++ }, ++ [LED_TYPE_FAN] = { ++ .name = "accton_as7716_32x_led::fan", ++ .default_trigger = "unused", ++ .brightness_set = accton_as7716_32x_led_auto_set, ++ .brightness_get = accton_as7716_32x_led_auto_get, ++ .flags = LED_CORE_SUSPENDRESUME, ++ .max_brightness = LED_MODE_AUTO, ++ }, ++ [LED_TYPE_PSU1] = { ++ .name = "accton_as7716_32x_led::psu1", ++ .default_trigger = "unused", ++ .brightness_set = accton_as7716_32x_led_auto_set, ++ .brightness_get = accton_as7716_32x_led_auto_get, ++ .flags = LED_CORE_SUSPENDRESUME, ++ .max_brightness = LED_MODE_AUTO, ++ }, ++ [LED_TYPE_PSU2] = { ++ .name = "accton_as7716_32x_led::psu2", ++ .default_trigger = "unused", ++ .brightness_set = accton_as7716_32x_led_auto_set, ++ .brightness_get = accton_as7716_32x_led_auto_get, ++ .flags = LED_CORE_SUSPENDRESUME, ++ .max_brightness = LED_MODE_AUTO, ++ }, ++}; ++ ++static int accton_as7716_32x_led_suspend(struct platform_device *dev, ++ pm_message_t state) ++{ ++ int i = 0; ++ ++ for (i = 0; i < ARRAY_SIZE(accton_as7716_32x_leds); i++) { ++ led_classdev_suspend(&accton_as7716_32x_leds[i]); ++ } ++ ++ return 0; ++} ++ ++static int accton_as7716_32x_led_resume(struct platform_device *dev) ++{ ++ int i = 0; ++ ++ for (i = 0; i < ARRAY_SIZE(accton_as7716_32x_leds); i++) { ++ led_classdev_resume(&accton_as7716_32x_leds[i]); ++ } ++ ++ return 0; ++} ++ ++static int accton_as7716_32x_led_probe(struct platform_device *pdev) ++{ ++ int ret, i; ++ ++ for (i = 0; i < ARRAY_SIZE(accton_as7716_32x_leds); i++) { ++ ret = led_classdev_register(&pdev->dev, &accton_as7716_32x_leds[i]); ++ ++ if (ret < 0) ++ break; ++ } ++ ++ /* Check if all LEDs were successfully registered */ ++ if (i != ARRAY_SIZE(accton_as7716_32x_leds)){ ++ int j; ++ ++ /* only unregister the LEDs that were successfully registered */ ++ for (j = 0; j < i; j++) { ++ led_classdev_unregister(&accton_as7716_32x_leds[i]); ++ } ++ } ++ ++ return ret; ++} ++ ++static int accton_as7716_32x_led_remove(struct platform_device *pdev) ++{ ++ int i; ++ ++ for (i = 0; i < ARRAY_SIZE(accton_as7716_32x_leds); i++) { ++ led_classdev_unregister(&accton_as7716_32x_leds[i]); ++ } ++ ++ return 0; ++} ++ ++static struct platform_driver accton_as7716_32x_led_driver = { ++ .probe = accton_as7716_32x_led_probe, ++ .remove = accton_as7716_32x_led_remove, ++ .suspend = accton_as7716_32x_led_suspend, ++ .resume = accton_as7716_32x_led_resume, ++ .driver = { ++ .name = DRVNAME, ++ .owner = THIS_MODULE, ++ }, ++}; ++ ++static int __init accton_as7716_32x_led_init(void) ++{ ++ int ret; ++ ++ extern int platform_accton_as7716_32x(void); ++ if (!platform_accton_as7716_32x()) { ++ return -ENODEV; ++ } ++ ++ ret = platform_driver_register(&accton_as7716_32x_led_driver); ++ if (ret < 0) { ++ goto exit; ++ } ++ ++ ledctl = kzalloc(sizeof(struct accton_as7716_32x_led_data), GFP_KERNEL); ++ if (!ledctl) { ++ ret = -ENOMEM; ++ platform_driver_unregister(&accton_as7716_32x_led_driver); ++ goto exit; ++ } ++ ++ mutex_init(&ledctl->update_lock); ++ ++ ledctl->pdev = platform_device_register_simple(DRVNAME, -1, NULL, 0); ++ if (IS_ERR(ledctl->pdev)) { ++ ret = PTR_ERR(ledctl->pdev); ++ platform_driver_unregister(&accton_as7716_32x_led_driver); ++ kfree(ledctl); ++ goto exit; ++ } ++ ++exit: ++ return ret; ++} ++ ++static void __exit accton_as7716_32x_led_exit(void) ++{ ++ platform_device_unregister(ledctl->pdev); ++ platform_driver_unregister(&accton_as7716_32x_led_driver); ++ kfree(ledctl); ++} ++ ++module_init(accton_as7716_32x_led_init); ++module_exit(accton_as7716_32x_led_exit); ++ ++MODULE_AUTHOR("Brandon Chuang "); ++MODULE_DESCRIPTION("accton_as7716_32x_led driver"); ++MODULE_LICENSE("GPL"); +diff --git a/drivers/misc/eeprom/Kconfig b/drivers/misc/eeprom/Kconfig +index c75227b..3ef5125 100644 +--- a/drivers/misc/eeprom/Kconfig ++++ b/drivers/misc/eeprom/Kconfig +@@ -135,7 +135,16 @@ config EEPROM_ACCTON_AS5812_54t_SFP + + This driver can also be built as a module. If so, the module will + be called accton_as5812_54t_sfp. +- ++ ++config EEPROM_ACCTON_AS7716_32x_SFP ++ tristate "Accton as7716 32x sfp" ++ depends on I2C && SENSORS_ACCTON_I2C_CPLD ++ help ++ If you say yes here you get support for Accton as7716 32x sfp. ++ ++ This driver can also be built as a module. If so, the module will ++ be called accton_as7716_32x_sfp. ++ + config EEPROM_93CX6 + tristate "EEPROM 93CX6 support" + help +diff --git a/drivers/misc/eeprom/Makefile b/drivers/misc/eeprom/Makefile +index 152a8bc..dd47dd2 100644 +--- a/drivers/misc/eeprom/Makefile ++++ b/drivers/misc/eeprom/Makefile +@@ -13,4 +13,5 @@ obj-$(CONFIG_EEPROM_ACCTON_AS7712_32x_SFP) += accton_as7712_32x_sfp.o + obj-$(CONFIG_EEPROM_ACCTON_AS5812_54x_SFP) += accton_as5812_54x_sfp.o + obj-$(CONFIG_EEPROM_ACCTON_AS6812_32x_SFP) += accton_as6812_32x_sfp.o + obj-$(CONFIG_EEPROM_ACCTON_AS5812_54t_SFP) += accton_as5812_54t_sfp.o ++obj-$(CONFIG_EEPROM_ACCTON_AS7716_32x_SFP) += accton_as7716_32x_sfp.o + obj-$(CONFIG_EEPROM_SFF_8436) += sff_8436_eeprom.o +diff --git a/drivers/misc/eeprom/accton_as7716_32x_sfp.c b/drivers/misc/eeprom/accton_as7716_32x_sfp.c +new file mode 100644 +index 0000000..432e9b7 +--- /dev/null ++++ b/drivers/misc/eeprom/accton_as7716_32x_sfp.c +@@ -0,0 +1,356 @@ ++/* ++ * An hwmon driver for accton as7716_32x sfp ++ * ++ * Copyright (C) 2014 Accton Technology Corporation. ++ * Brandon Chuang ++ * ++ * Based on ad7414.c ++ * Copyright 2006 Stefan Roese , DENX Software Engineering ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; either version 2 of the License, or ++ * (at your option) any later version. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program; if not, write to the Free Software ++ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#define BIT_INDEX(i) (1UL << (i)) ++ ++ ++/* Addresses scanned ++ */ ++static const unsigned short normal_i2c[] = { 0x50, I2C_CLIENT_END }; ++ ++/* Each client has this additional data ++ */ ++struct as7716_32x_sfp_data { ++ struct device *hwmon_dev; ++ struct mutex update_lock; ++ char valid; /* !=0 if registers are valid */ ++ unsigned long last_updated; /* In jiffies */ ++ int port; /* Front port index */ ++ char eeprom[256]; /* eeprom data */ ++ u32 is_present; /* present status */ ++}; ++ ++static struct as7716_32x_sfp_data *as7716_32x_sfp_update_device(struct device *dev); ++static ssize_t show_port_number(struct device *dev, struct device_attribute *da, char *buf); ++static ssize_t show_present(struct device *dev, struct device_attribute *da,char *buf); ++static ssize_t show_eeprom(struct device *dev, struct device_attribute *da, char *buf); ++extern int accton_i2c_cpld_read(unsigned short cpld_addr, u8 reg); ++extern int accton_i2c_cpld_write(unsigned short cpld_addr, u8 reg, u8 value); ++ ++enum as7716_32x_sfp_sysfs_attributes { ++ SFP_PORT_NUMBER, ++ SFP_IS_PRESENT, ++ SFP_IS_PRESENT_ALL, ++ SFP_EEPROM ++}; ++ ++/* sysfs attributes for hwmon ++ */ ++static SENSOR_DEVICE_ATTR(sfp_port_number, S_IRUGO, show_port_number, NULL, SFP_PORT_NUMBER); ++static SENSOR_DEVICE_ATTR(sfp_is_present, S_IRUGO, show_present, NULL, SFP_IS_PRESENT); ++static SENSOR_DEVICE_ATTR(sfp_is_present_all, S_IRUGO, show_present, NULL, SFP_IS_PRESENT_ALL); ++static SENSOR_DEVICE_ATTR(sfp_eeprom, S_IRUGO, show_eeprom, NULL, SFP_EEPROM); ++ ++static struct attribute *as7716_32x_sfp_attributes[] = { ++ &sensor_dev_attr_sfp_port_number.dev_attr.attr, ++ &sensor_dev_attr_sfp_is_present.dev_attr.attr, ++ &sensor_dev_attr_sfp_is_present_all.dev_attr.attr, ++ &sensor_dev_attr_sfp_eeprom.dev_attr.attr, ++ NULL ++}; ++ ++static ssize_t show_port_number(struct device *dev, struct device_attribute *da, ++ char *buf) ++{ ++ struct i2c_client *client = to_i2c_client(dev); ++ struct as7716_32x_sfp_data *data = i2c_get_clientdata(client); ++ ++ return sprintf(buf, "%d\n", data->port+1); ++} ++ ++/* Error-check the CPLD read results. */ ++#define VALIDATED_READ(_buf, _rv, _read_expr, _invert) \ ++do { \ ++ _rv = (_read_expr); \ ++ if(_rv < 0) { \ ++ return sprintf(_buf, "READ ERROR\n"); \ ++ } \ ++ if(_invert) { \ ++ _rv = ~_rv; \ ++ } \ ++ _rv &= 0xFF; \ ++} while(0) ++ ++static ssize_t show_present(struct device *dev, struct device_attribute *da, ++ char *buf) ++{ ++ struct sensor_device_attribute *attr = to_sensor_dev_attr(da); ++ ++ if(attr->index == SFP_IS_PRESENT_ALL) { ++ int values[4]; ++ /* ++ * Report the SFP_PRESENCE status for all ports. ++ */ ++ ++ /* SFP_PRESENT Ports 1-8 */ ++ VALIDATED_READ(buf, values[0], accton_i2c_cpld_read(0x60, 0x30), 1); ++ /* SFP_PRESENT Ports 9-16 */ ++ VALIDATED_READ(buf, values[1], accton_i2c_cpld_read(0x60, 0x31), 1); ++ /* SFP_PRESENT Ports 17-24 */ ++ VALIDATED_READ(buf, values[2], accton_i2c_cpld_read(0x60, 0x32), 1); ++ /* SFP_PRESENT Ports 25-32 */ ++ VALIDATED_READ(buf, values[3], accton_i2c_cpld_read(0x60, 0x33), 1); ++ ++ /* Return values 1 -> 32 in order */ ++ return sprintf(buf, "%.2x %.2x %.2x %.2x\n", ++ values[0], values[1], values[2], values[3]); ++ } ++ else { /* SFP_IS_PRESENT */ ++ struct as7716_32x_sfp_data *data = as7716_32x_sfp_update_device(dev); ++ ++ if (!data->valid) { ++ return -EIO; ++ } ++ ++ return sprintf(buf, "%d\n", data->is_present); ++ } ++} ++ ++static ssize_t show_eeprom(struct device *dev, struct device_attribute *da, ++ char *buf) ++{ ++ struct as7716_32x_sfp_data *data = as7716_32x_sfp_update_device(dev); ++ ++ if (!data->valid) { ++ return 0; ++ } ++ ++ if (!data->is_present) { ++ return 0; ++ } ++ ++ memcpy(buf, data->eeprom, sizeof(data->eeprom)); ++ ++ return sizeof(data->eeprom); ++} ++ ++static const struct attribute_group as7716_32x_sfp_group = { ++ .attrs = as7716_32x_sfp_attributes, ++}; ++ ++static int as7716_32x_sfp_probe(struct i2c_client *client, ++ const struct i2c_device_id *dev_id) ++{ ++ struct as7716_32x_sfp_data *data; ++ int status; ++ ++ if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_I2C_BLOCK)) { ++ status = -EIO; ++ goto exit; ++ } ++ ++ data = kzalloc(sizeof(struct as7716_32x_sfp_data), GFP_KERNEL); ++ if (!data) { ++ status = -ENOMEM; ++ goto exit; ++ } ++ ++ mutex_init(&data->update_lock); ++ data->port = dev_id->driver_data; ++ i2c_set_clientdata(client, data); ++ ++ dev_info(&client->dev, "chip found\n"); ++ ++ /* Register sysfs hooks */ ++ status = sysfs_create_group(&client->dev.kobj, &as7716_32x_sfp_group); ++ if (status) { ++ goto exit_free; ++ } ++ ++ data->hwmon_dev = hwmon_device_register(&client->dev); ++ if (IS_ERR(data->hwmon_dev)) { ++ status = PTR_ERR(data->hwmon_dev); ++ goto exit_remove; ++ } ++ ++ dev_info(&client->dev, "%s: sfp '%s'\n", ++ dev_name(data->hwmon_dev), client->name); ++ ++ return 0; ++ ++exit_remove: ++ sysfs_remove_group(&client->dev.kobj, &as7716_32x_sfp_group); ++exit_free: ++ kfree(data); ++exit: ++ ++ return status; ++} ++ ++static int as7716_32x_sfp_remove(struct i2c_client *client) ++{ ++ struct as7716_32x_sfp_data *data = i2c_get_clientdata(client); ++ ++ hwmon_device_unregister(data->hwmon_dev); ++ sysfs_remove_group(&client->dev.kobj, &as7716_32x_sfp_group); ++ kfree(data); ++ ++ return 0; ++} ++ ++enum port_numbers { ++as7716_32x_sfp1, as7716_32x_sfp2, as7716_32x_sfp3, as7716_32x_sfp4, ++as7716_32x_sfp5, as7716_32x_sfp6, as7716_32x_sfp7, as7716_32x_sfp8, ++as7716_32x_sfp9, as7716_32x_sfp10,as7716_32x_sfp11,as7716_32x_sfp12, ++as7716_32x_sfp13,as7716_32x_sfp14,as7716_32x_sfp15,as7716_32x_sfp16, ++as7716_32x_sfp17,as7716_32x_sfp18,as7716_32x_sfp19,as7716_32x_sfp20, ++as7716_32x_sfp21,as7716_32x_sfp22,as7716_32x_sfp23,as7716_32x_sfp24, ++as7716_32x_sfp25,as7716_32x_sfp26,as7716_32x_sfp27,as7716_32x_sfp28, ++as7716_32x_sfp29,as7716_32x_sfp30,as7716_32x_sfp31,as7716_32x_sfp32 ++}; ++ ++static const struct i2c_device_id as7716_32x_sfp_id[] = { ++{ "as7716_32x_sfp1", as7716_32x_sfp1 }, { "as7716_32x_sfp2", as7716_32x_sfp2 }, ++{ "as7716_32x_sfp3", as7716_32x_sfp3 }, { "as7716_32x_sfp4", as7716_32x_sfp4 }, ++{ "as7716_32x_sfp5", as7716_32x_sfp5 }, { "as7716_32x_sfp6", as7716_32x_sfp6 }, ++{ "as7716_32x_sfp7", as7716_32x_sfp7 }, { "as7716_32x_sfp8", as7716_32x_sfp8 }, ++{ "as7716_32x_sfp9", as7716_32x_sfp9 }, { "as7716_32x_sfp10", as7716_32x_sfp10 }, ++{ "as7716_32x_sfp11", as7716_32x_sfp11 }, { "as7716_32x_sfp12", as7716_32x_sfp12 }, ++{ "as7716_32x_sfp13", as7716_32x_sfp13 }, { "as7716_32x_sfp14", as7716_32x_sfp14 }, ++{ "as7716_32x_sfp15", as7716_32x_sfp15 }, { "as7716_32x_sfp16", as7716_32x_sfp16 }, ++{ "as7716_32x_sfp17", as7716_32x_sfp17 }, { "as7716_32x_sfp18", as7716_32x_sfp18 }, ++{ "as7716_32x_sfp19", as7716_32x_sfp19 }, { "as7716_32x_sfp20", as7716_32x_sfp20 }, ++{ "as7716_32x_sfp21", as7716_32x_sfp21 }, { "as7716_32x_sfp22", as7716_32x_sfp22 }, ++{ "as7716_32x_sfp23", as7716_32x_sfp23 }, { "as7716_32x_sfp24", as7716_32x_sfp24 }, ++{ "as7716_32x_sfp25", as7716_32x_sfp25 }, { "as7716_32x_sfp26", as7716_32x_sfp26 }, ++{ "as7716_32x_sfp27", as7716_32x_sfp27 }, { "as7716_32x_sfp28", as7716_32x_sfp28 }, ++{ "as7716_32x_sfp29", as7716_32x_sfp29 }, { "as7716_32x_sfp30", as7716_32x_sfp30 }, ++{ "as7716_32x_sfp31", as7716_32x_sfp31 }, { "as7716_32x_sfp32", as7716_32x_sfp32 }, ++{} ++}; ++MODULE_DEVICE_TABLE(i2c, as7716_32x_sfp_id); ++ ++static struct i2c_driver as7716_32x_sfp_driver = { ++ .class = I2C_CLASS_HWMON, ++ .driver = { ++ .name = "as7716_32x_sfp", ++ }, ++ .probe = as7716_32x_sfp_probe, ++ .remove = as7716_32x_sfp_remove, ++ .id_table = as7716_32x_sfp_id, ++ .address_list = normal_i2c, ++}; ++ ++static int as7716_32x_sfp_read_block(struct i2c_client *client, u8 command, u8 *data, ++ int data_len) ++{ ++ int result = i2c_smbus_read_i2c_block_data(client, command, data_len, data); ++ ++ if (unlikely(result < 0)) ++ goto abort; ++ if (unlikely(result != data_len)) { ++ result = -EIO; ++ goto abort; ++ } ++ ++ result = 0; ++ ++abort: ++ return result; ++} ++ ++static struct as7716_32x_sfp_data *as7716_32x_sfp_update_device(struct device *dev) ++{ ++ struct i2c_client *client = to_i2c_client(dev); ++ struct as7716_32x_sfp_data *data = i2c_get_clientdata(client); ++ ++ mutex_lock(&data->update_lock); ++ ++ if (time_after(jiffies, data->last_updated + HZ + HZ / 2) ++ || !data->valid) { ++ int status = -1; ++ int i = 0; ++ u8 cpld_reg = 0x30 + (data->port/8); ++ ++ data->valid = 0; ++ ++ /* Read present status of the specified port number */ ++ data->is_present = 0; ++ status = accton_i2c_cpld_read(0x60, cpld_reg); ++ ++ if (status < 0) { ++ dev_dbg(&client->dev, "cpld(0x60) reg(0x%x) err %d\n", cpld_reg, status); ++ goto exit; ++ } ++ ++ data->is_present = (status & (1 << (data->port % 8))) ? 0 : 1; ++ ++ /* Read eeprom data based on port number */ ++ memset(data->eeprom, 0, sizeof(data->eeprom)); ++ ++ /* Check if the port is present */ ++ if (data->is_present) { ++ /* read eeprom */ ++ for (i = 0; i < sizeof(data->eeprom)/I2C_SMBUS_BLOCK_MAX; i++) { ++ status = as7716_32x_sfp_read_block(client, i*I2C_SMBUS_BLOCK_MAX, ++ data->eeprom+(i*I2C_SMBUS_BLOCK_MAX), ++ I2C_SMBUS_BLOCK_MAX); ++ if (status < 0) { ++ dev_dbg(&client->dev, "unable to read eeprom from port(%d)\n", data->port); ++ goto exit; ++ } ++ } ++ } ++ ++ data->last_updated = jiffies; ++ data->valid = 1; ++ } ++ ++exit: ++ mutex_unlock(&data->update_lock); ++ ++ return data; ++} ++ ++static int __init as7716_32x_sfp_init(void) ++{ ++ extern int platform_accton_as7716_32x(void); ++ if (!platform_accton_as7716_32x()) { ++ return -ENODEV; ++ } ++ ++ return i2c_add_driver(&as7716_32x_sfp_driver); ++} ++ ++static void __exit as7716_32x_sfp_exit(void) ++{ ++ i2c_del_driver(&as7716_32x_sfp_driver); ++} ++ ++MODULE_AUTHOR("Brandon Chuang "); ++MODULE_DESCRIPTION("accton as7716_32x_sfp driver"); ++MODULE_LICENSE("GPL"); ++ ++module_init(as7716_32x_sfp_init); ++module_exit(as7716_32x_sfp_exit); + diff --git a/packages/base/any/kernels/3.2.65-1+deb7u2/patches/series b/packages/base/any/kernels/3.2.65-1+deb7u2/patches/series index f958b6b7..3b224736 100644 --- a/packages/base/any/kernels/3.2.65-1+deb7u2/patches/series +++ b/packages/base/any/kernels/3.2.65-1+deb7u2/patches/series @@ -246,3 +246,6 @@ platform-accton-as6812_32x-device-drivers.patch platform-accton-as5812_54t-device-drivers.patch driver-mfd-lpc-ich.patch driver-watchdog-itco-wd.patch +platform-accton-as7716_32x-device-drivers.patch +driver-broadcom-tigon3.patch +mgmt-port-init-config.patch From da17a8a6d08ff7f943787ef0a12f3a87fba256e6 Mon Sep 17 00:00:00 2001 From: Lewis Kang Date: Wed, 20 Apr 2016 12:11:36 +0800 Subject: [PATCH 2/6] add as7716-32x needed kernel config (port from ONL1.0) --- .../3.2.65-1+deb7u2/configs/x86_64-all/x86_64-all.config | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/packages/base/any/kernels/3.2.65-1+deb7u2/configs/x86_64-all/x86_64-all.config b/packages/base/any/kernels/3.2.65-1+deb7u2/configs/x86_64-all/x86_64-all.config index 49879a00..129d65a1 100644 --- a/packages/base/any/kernels/3.2.65-1+deb7u2/configs/x86_64-all/x86_64-all.config +++ b/packages/base/any/kernels/3.2.65-1+deb7u2/configs/x86_64-all/x86_64-all.config @@ -1054,6 +1054,7 @@ CONFIG_EEPROM_ACCTON_AS7712_32x_SFP=y CONFIG_EEPROM_ACCTON_AS5812_54x_SFP=y CONFIG_EEPROM_ACCTON_AS6812_32x_SFP=y CONFIG_EEPROM_ACCTON_AS5812_54t_SFP=y +CONFIG_EEPROM_ACCTON_AS7716_32x_SFP=y CONFIG_EEPROM_93CX6=y # CONFIG_EEPROM_93XX46 is not set CONFIG_EEPROM_SFF_8436=y @@ -1927,6 +1928,8 @@ CONFIG_SENSORS_ACCTON_AS6812_32x_FAN=y CONFIG_SENSORS_ACCTON_AS6812_32x_PSU=y CONFIG_SENSORS_ACCTON_AS5812_54t_FAN=y CONFIG_SENSORS_ACCTON_AS5812_54t_PSU=y +CONFIG_SENSORS_ACCTON_AS7716_32x_FAN=y +CONFIG_SENSORS_ACCTON_AS7716_32x_PSU=y # # ACPI drivers @@ -2306,6 +2309,7 @@ CONFIG_LEDS_ACCTON_AS7712_32x=y CONFIG_LEDS_ACCTON_AS5812_54x=y CONFIG_LEDS_ACCTON_AS6812_32x=y CONFIG_LEDS_ACCTON_AS5812_54t=y +CONFIG_LEDS_ACCTON_AS7716_32x=y # CONFIG_LEDS_LM3530 is not set # CONFIG_LEDS_PCA9532 is not set # CONFIG_LEDS_GPIO is not set From bba31e1006016884a2cc1110f647fbc1433a3e66 Mon Sep 17 00:00:00 2001 From: Lewis Kang Date: Mon, 25 Apr 2016 13:43:12 +0800 Subject: [PATCH 3/6] 1. use BM instead of SM for platform matching. 2. resolve conflicts because of as5512 merging in upstream. 1. Use Baseboard Manufacturer(BM) instead of System Manufacturer(SM) and plus Product Name for platform matching 2. The as5512 support merging in upstream causes conflicts for merging of this pull request, so we need to update the conflicted files. --- .../configs/x86_64-all/x86_64-all.config | 4 + ...orm-accton-as5512_54x-device-drivers.patch | 2608 +++++++++++++++++ ...orm-accton-as5712_54x-device-drivers.patch | 2 +- ...orm-accton-as5812_54t-device-drivers.patch | 2 +- ...orm-accton-as5812_54x-device-drivers.patch | 2 +- ...orm-accton-as6712_32x-device-drivers.patch | 2 +- ...orm-accton-as6812_32x-device-drivers.patch | 2 +- ...orm-accton-as7512_32x-device-drivers.patch | 2 +- ...orm-accton-as7712_32x-device-drivers.patch | 2 +- ...orm-accton-as7716_32x-device-drivers.patch | 95 +- .../kernels/3.2.65-1+deb7u2/patches/series | 1 + 11 files changed, 2664 insertions(+), 58 deletions(-) create mode 100644 packages/base/any/kernels/3.2.65-1+deb7u2/patches/platform-accton-as5512_54x-device-drivers.patch diff --git a/packages/base/any/kernels/3.2.65-1+deb7u2/configs/x86_64-all/x86_64-all.config b/packages/base/any/kernels/3.2.65-1+deb7u2/configs/x86_64-all/x86_64-all.config index 129d65a1..90101507 100644 --- a/packages/base/any/kernels/3.2.65-1+deb7u2/configs/x86_64-all/x86_64-all.config +++ b/packages/base/any/kernels/3.2.65-1+deb7u2/configs/x86_64-all/x86_64-all.config @@ -1047,6 +1047,7 @@ CONFIG_EEPROM_AT24=y CONFIG_EEPROM_AT25=y # CONFIG_EEPROM_LEGACY is not set # CONFIG_EEPROM_MAX6875 is not set +CONFIG_EEPROM_ACCTON_AS5512_54X_SFP=y CONFIG_EEPROM_ACCTON_AS5712_54x_SFP=y CONFIG_EEPROM_ACCTON_AS6712_32x_SFP=y CONFIG_EEPROM_ACCTON_AS7512_32x_SFP=y @@ -1912,6 +1913,8 @@ CONFIG_SENSORS_W83781D=y # CONFIG_SENSORS_APPLESMC is not set # CONFIG_SENSORS_QUANTA_LY_HWMON is not set CONFIG_SENSORS_CPR_4011_4MXX=y +CONFIG_SENSORS_ACCTON_AS5512_54X_PSU=y +CONFIG_SENSORS_ACCTON_AS5512_54X_FAN=y CONFIG_SENSORS_ACCTON_AS5712_54x_FAN=y CONFIG_SENSORS_ACCTON_AS5712_54x_PSU=y CONFIG_SENSORS_ACCTON_AS6712_32x_FAN=y @@ -2302,6 +2305,7 @@ CONFIG_LEDS_CLASS=y # # LED drivers # +CONFIG_LEDS_ACCTON_AS5512_54X=y CONFIG_LEDS_ACCTON_AS5712_54x=y CONFIG_LEDS_ACCTON_AS6712_32x=y CONFIG_LEDS_ACCTON_AS7512_32x=y diff --git a/packages/base/any/kernels/3.2.65-1+deb7u2/patches/platform-accton-as5512_54x-device-drivers.patch b/packages/base/any/kernels/3.2.65-1+deb7u2/patches/platform-accton-as5512_54x-device-drivers.patch new file mode 100644 index 00000000..da1cad21 --- /dev/null +++ b/packages/base/any/kernels/3.2.65-1+deb7u2/patches/platform-accton-as5512_54x-device-drivers.patch @@ -0,0 +1,2608 @@ +Device driver patches for accton as5512 (fan/psu/cpld/led/sfp) + +diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig +index 89c619d..968bd5f 100644 +--- a/drivers/hwmon/Kconfig ++++ b/drivers/hwmon/Kconfig +@@ -1574,6 +1574,24 @@ config SENSORS_ACCTON_AS5812_54t_PSU + This driver can also be built as a module. If so, the module will + be called accton_as5812_54t_psu. + ++config SENSORS_ACCTON_AS5512_54X_PSU ++ tristate "Accton as5512 54x psu" ++ depends on I2C && SENSORS_ACCTON_I2C_CPLD ++ help ++ If you say yes here you get support for Accton as5512 54x psu. ++ ++ This driver can also be built as a module. If so, the module will ++ be called accton_as5512_54x_psu. ++ ++config SENSORS_ACCTON_AS5512_54X_FAN ++ tristate "Accton as5512 54x fan" ++ depends on I2C && SENSORS_ACCTON_I2C_CPLD ++ help ++ If you say yes here you get support for Accton as5512 54x fan. ++ ++ This driver can also be built as a module. If so, the module will ++ be called accton_as5512_54x_fan. ++ + if ACPI + + comment "ACPI drivers" +diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile +index de922bc..b8ee7b0 100644 +--- a/drivers/hwmon/Makefile ++++ b/drivers/hwmon/Makefile +@@ -36,6 +36,8 @@ obj-$(CONFIG_SENSORS_ACCTON_AS6812_32x_FAN) += accton_as6812_32x_fan.o + obj-$(CONFIG_SENSORS_ACCTON_AS6812_32x_PSU) += accton_as6812_32x_psu.o + obj-$(CONFIG_SENSORS_ACCTON_AS5812_54t_FAN) += accton_as5812_54t_fan.o + obj-$(CONFIG_SENSORS_ACCTON_AS5812_54t_PSU) += accton_as5812_54t_psu.o ++obj-$(CONFIG_SENSORS_ACCTON_AS5512_54X_PSU) += accton_as5512_54x_psu.o ++obj-$(CONFIG_SENSORS_ACCTON_AS5512_54X_FAN) += accton_as5512_54x_fan.o + obj-$(CONFIG_SENSORS_AD7314) += ad7314.o + obj-$(CONFIG_SENSORS_AD7414) += ad7414.o + obj-$(CONFIG_SENSORS_AD7418) += ad7418.o +diff --git a/drivers/hwmon/accton_as5512_54x_fan.c b/drivers/hwmon/accton_as5512_54x_fan.c +new file mode 100644 +index 0000000..67e3dd6 +--- /dev/null ++++ b/drivers/hwmon/accton_as5512_54x_fan.c +@@ -0,0 +1,454 @@ ++/* ++ * A hwmon driver for the Accton as5512 54x fan control ++ * ++ * Copyright (C) 2015 Accton Technology Corporation. ++ * Brandon Chuang ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; either version 2 of the License, or ++ * (at your option) any later version. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program; if not, write to the Free Software ++ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#define FAN_MAX_NUMBER 5 ++#define FAN_SPEED_CPLD_TO_RPM_STEP 150 ++#define FAN_SPEED_PRECENT_TO_CPLD_STEP 5 ++#define FAN_DUTY_CYCLE_MIN 0 ++#define FAN_DUTY_CYCLE_MAX 100 /* 100% */ ++ ++#define CPLD_REG_FAN_STATUS_OFFSET 0x0C ++#define CPLD_REG_FANR_STATUS_OFFSET 0x1E ++#define CPLD_REG_FAN_DIRECTION_OFFSET 0x1D ++ ++#define CPLD_FAN1_REG_SPEED_OFFSET 0x10 ++#define CPLD_FAN2_REG_SPEED_OFFSET 0x11 ++#define CPLD_FAN3_REG_SPEED_OFFSET 0x12 ++#define CPLD_FAN4_REG_SPEED_OFFSET 0x13 ++#define CPLD_FAN5_REG_SPEED_OFFSET 0x14 ++ ++#define CPLD_FANR1_REG_SPEED_OFFSET 0x18 ++#define CPLD_FANR2_REG_SPEED_OFFSET 0x19 ++#define CPLD_FANR3_REG_SPEED_OFFSET 0x1A ++#define CPLD_FANR4_REG_SPEED_OFFSET 0x1B ++#define CPLD_FANR5_REG_SPEED_OFFSET 0x1C ++ ++#define CPLD_REG_FAN_PWM_CYCLE_OFFSET 0x0D ++ ++#define CPLD_FAN1_INFO_BIT_MASK 0x01 ++#define CPLD_FAN2_INFO_BIT_MASK 0x02 ++#define CPLD_FAN3_INFO_BIT_MASK 0x04 ++#define CPLD_FAN4_INFO_BIT_MASK 0x08 ++#define CPLD_FAN5_INFO_BIT_MASK 0x10 ++ ++#define PROJECT_NAME ++ ++#define LOCAL_DEBUG 0 ++ ++static struct accton_as5512_54x_fan *fan_data = NULL; ++ ++struct accton_as5512_54x_fan { ++ struct platform_device *pdev; ++ struct device *hwmon_dev; ++ struct mutex update_lock; ++ char valid; /* != 0 if registers are valid */ ++ unsigned long last_updated; /* In jiffies */ ++ u8 status[FAN_MAX_NUMBER]; /* inner first fan status */ ++ u32 speed[FAN_MAX_NUMBER]; /* inner first fan speed */ ++ u8 direction[FAN_MAX_NUMBER]; /* reconrd the direction of inner first and second fans */ ++ u32 duty_cycle[FAN_MAX_NUMBER]; /* control the speed of inner first and second fans */ ++ u8 r_status[FAN_MAX_NUMBER]; /* inner second fan status */ ++ u32 r_speed[FAN_MAX_NUMBER]; /* inner second fan speed */ ++}; ++ ++/*******************/ ++#define MAKE_FAN_MASK_OR_REG(name,type) \ ++ CPLD_FAN##type##1_##name, \ ++ CPLD_FAN##type##2_##name, \ ++ CPLD_FAN##type##3_##name, \ ++ CPLD_FAN##type##4_##name, \ ++ CPLD_FAN##type##5_##name, ++ ++/* fan related data ++ */ ++static const u8 fan_info_mask[] = { ++ MAKE_FAN_MASK_OR_REG(INFO_BIT_MASK,) ++}; ++ ++static const u8 fan_speed_reg[] = { ++ MAKE_FAN_MASK_OR_REG(REG_SPEED_OFFSET,) ++}; ++ ++static const u8 fanr_speed_reg[] = { ++ MAKE_FAN_MASK_OR_REG(REG_SPEED_OFFSET,R) ++}; ++ ++/*******************/ ++#define DEF_FAN_SET(id) \ ++ FAN##id##_FAULT, \ ++ FAN##id##_SPEED, \ ++ FAN##id##_DUTY_CYCLE, \ ++ FAN##id##_DIRECTION, \ ++ FANR##id##_FAULT, \ ++ FANR##id##_SPEED, ++ ++enum sysfs_fan_attributes { ++ DEF_FAN_SET(1) ++ DEF_FAN_SET(2) ++ DEF_FAN_SET(3) ++ DEF_FAN_SET(4) ++ DEF_FAN_SET(5) ++}; ++/*******************/ ++static void accton_as5512_54x_fan_update_device(struct device *dev); ++static int accton_as5512_54x_fan_read_value(u8 reg); ++static int accton_as5512_54x_fan_write_value(u8 reg, u8 value); ++ ++static ssize_t fan_set_duty_cycle(struct device *dev, ++ struct device_attribute *da,const char *buf, size_t count); ++static ssize_t fan_show_value(struct device *dev, ++ struct device_attribute *da, char *buf); ++ ++extern int accton_i2c_cpld_read(unsigned short cpld_addr, u8 reg); ++extern int accton_i2c_cpld_write(unsigned short cpld_addr, u8 reg, u8 value); ++ ++ ++/*******************/ ++#define _MAKE_SENSOR_DEVICE_ATTR(prj, id) \ ++ static SENSOR_DEVICE_ATTR(prj##fan##id##_fault, S_IRUGO, fan_show_value, NULL, FAN##id##_FAULT); \ ++ static SENSOR_DEVICE_ATTR(prj##fan##id##_speed_rpm, S_IRUGO, fan_show_value, NULL, FAN##id##_SPEED); \ ++ static SENSOR_DEVICE_ATTR(prj##fan##id##_direction, S_IRUGO, fan_show_value, NULL, FAN##id##_DIRECTION); \ ++ static SENSOR_DEVICE_ATTR(prj##fanr##id##_fault, S_IRUGO, fan_show_value, NULL, FANR##id##_FAULT); \ ++ static SENSOR_DEVICE_ATTR(prj##fanr##id##_speed_rpm, S_IRUGO, fan_show_value, NULL, FANR##id##_SPEED); ++ ++#define MAKE_SENSOR_DEVICE_ATTR(prj,id) _MAKE_SENSOR_DEVICE_ATTR(prj,id) ++ ++#define _MAKE_SENSOR_DEVICE_ATTR_FAN_DUTY(prj,id) \ ++ static SENSOR_DEVICE_ATTR(prj##fan##id##_duty_cycle_percentage, S_IWUSR | S_IRUGO, fan_show_value, \ ++ fan_set_duty_cycle, FAN1_DUTY_CYCLE); ++ ++#define MAKE_SENSOR_DEVICE_ATTR_FAN_DUTY(prj,id) _MAKE_SENSOR_DEVICE_ATTR_FAN_DUTY(prj,id) ++ ++ ++MAKE_SENSOR_DEVICE_ATTR(PROJECT_NAME, 1) ++MAKE_SENSOR_DEVICE_ATTR(PROJECT_NAME, 2) ++MAKE_SENSOR_DEVICE_ATTR(PROJECT_NAME, 3) ++MAKE_SENSOR_DEVICE_ATTR(PROJECT_NAME, 4) ++MAKE_SENSOR_DEVICE_ATTR(PROJECT_NAME, 5) ++MAKE_SENSOR_DEVICE_ATTR_FAN_DUTY(PROJECT_NAME,) ++/*******************/ ++ ++#define _MAKE_FAN_ATTR(prj, id) \ ++ &sensor_dev_attr_##prj##fan##id##_fault.dev_attr.attr, \ ++ &sensor_dev_attr_##prj##fan##id##_speed_rpm.dev_attr.attr, \ ++ &sensor_dev_attr_##prj##fan##id##_direction.dev_attr.attr, \ ++ &sensor_dev_attr_##prj##fanr##id##_fault.dev_attr.attr, \ ++ &sensor_dev_attr_##prj##fanr##id##_speed_rpm.dev_attr.attr, ++ ++#define MAKE_FAN_ATTR(prj, id) _MAKE_FAN_ATTR(prj, id) ++ ++#define _MAKE_FAN_DUTY_ATTR(prj, id) \ ++ &sensor_dev_attr_##prj##fan##id##_duty_cycle_percentage.dev_attr.attr, ++ ++#define MAKE_FAN_DUTY_ATTR(prj, id) _MAKE_FAN_DUTY_ATTR(prj, id) ++ ++static struct attribute *accton_as5512_54x_fan_attributes[] = { ++ /* fan related attributes */ ++ MAKE_FAN_ATTR(PROJECT_NAME,1) ++ MAKE_FAN_ATTR(PROJECT_NAME,2) ++ MAKE_FAN_ATTR(PROJECT_NAME,3) ++ MAKE_FAN_ATTR(PROJECT_NAME,4) ++ MAKE_FAN_ATTR(PROJECT_NAME,5) ++ MAKE_FAN_DUTY_ATTR(PROJECT_NAME,) ++ NULL ++}; ++/*******************/ ++ ++/* fan related functions ++ */ ++static ssize_t fan_show_value(struct device *dev, struct device_attribute *da, ++ char *buf) ++{ ++ struct sensor_device_attribute *attr = to_sensor_dev_attr(da); ++ ssize_t ret = 0; ++ int data_index, type_index; ++ ++ accton_as5512_54x_fan_update_device(dev); ++ ++ if (fan_data->valid == 0) { ++ return ret; ++ } ++ ++ type_index = attr->index%FAN2_FAULT; ++ data_index = attr->index/FAN2_FAULT; ++ ++ switch (type_index) { ++ case FAN1_FAULT: ++ ret = sprintf(buf, "%d\n", fan_data->status[data_index]); ++ if (LOCAL_DEBUG) ++ printk ("[Check !!][%s][%d][type->index=%d][data->index=%d]\n", __FUNCTION__, __LINE__, type_index, data_index); ++ break; ++ case FAN1_SPEED: ++ ret = sprintf(buf, "%d\n", fan_data->speed[data_index]); ++ if (LOCAL_DEBUG) ++ printk ("[Check !!][%s][%d][type->index=%d][data->index=%d]\n", __FUNCTION__, __LINE__, type_index, data_index); ++ break; ++ case FAN1_DUTY_CYCLE: ++ ret = sprintf(buf, "%d\n", fan_data->duty_cycle[data_index]); ++ if (LOCAL_DEBUG) ++ printk ("[Check !!][%s][%d][type->index=%d][data->index=%d]\n", __FUNCTION__, __LINE__, type_index, data_index); ++ break; ++ case FAN1_DIRECTION: ++ ret = sprintf(buf, "%d\n", fan_data->direction[data_index]); /* presnet, need to modify*/ ++ if (LOCAL_DEBUG) ++ printk ("[Check !!][%s][%d][type->index=%d][data->index=%d]\n", __FUNCTION__, __LINE__, type_index, data_index); ++ break; ++ case FANR1_FAULT: ++ ret = sprintf(buf, "%d\n", fan_data->r_status[data_index]); ++ if (LOCAL_DEBUG) ++ printk ("[Check !!][%s][%d][type->index=%d][data->index=%d]\n", __FUNCTION__, __LINE__, type_index, data_index); ++ break; ++ case FANR1_SPEED: ++ ret = sprintf(buf, "%d\n", fan_data->r_speed[data_index]); ++ if (LOCAL_DEBUG) ++ printk ("[Check !!][%s][%d][type->index=%d][data->index=%d]\n", __FUNCTION__, __LINE__, type_index, data_index); ++ break; ++ default: ++ if (LOCAL_DEBUG) ++ printk ("[Check !!][%s][%d] \n", __FUNCTION__, __LINE__); ++ break; ++ } ++ ++ return ret; ++} ++/*******************/ ++static ssize_t fan_set_duty_cycle(struct device *dev, struct device_attribute *da, ++ const char *buf, size_t count) { ++ ++ int error, value; ++ ++ error = kstrtoint(buf, 10, &value); ++ if (error) ++ return error; ++ ++ if (value < FAN_DUTY_CYCLE_MIN || value > FAN_DUTY_CYCLE_MAX) ++ return -EINVAL; ++ ++ accton_as5512_54x_fan_write_value(CPLD_REG_FAN_PWM_CYCLE_OFFSET, value/FAN_SPEED_PRECENT_TO_CPLD_STEP); ++ ++ fan_data->valid = 0; ++ ++ return count; ++} ++ ++static const struct attribute_group accton_as5512_54x_fan_group = { ++ .attrs = accton_as5512_54x_fan_attributes, ++}; ++ ++static int accton_as5512_54x_fan_read_value(u8 reg) ++{ ++ return accton_i2c_cpld_read(0x60, reg); ++} ++ ++static int accton_as5512_54x_fan_write_value(u8 reg, u8 value) ++{ ++ return accton_i2c_cpld_write(0x60, reg, value); ++} ++ ++static void accton_as5512_54x_fan_update_device(struct device *dev) ++{ ++ int speed, r_speed, fault, r_fault, ctrl_speed, direction; ++ int i; ++ ++ mutex_lock(&fan_data->update_lock); ++ ++ if (LOCAL_DEBUG) ++ printk ("Starting accton_as5512_54x_fan update \n"); ++ ++ if (!(time_after(jiffies, fan_data->last_updated + HZ + HZ / 2) || !fan_data->valid)) { ++ /* do nothing */ ++ goto _exit; ++ } ++ ++ fan_data->valid = 0; ++ ++ if (LOCAL_DEBUG) ++ printk ("Starting accton_as5512_54x_fan update 2 \n"); ++ ++ fault = accton_as5512_54x_fan_read_value(CPLD_REG_FAN_STATUS_OFFSET); ++ r_fault = accton_as5512_54x_fan_read_value(CPLD_REG_FANR_STATUS_OFFSET); ++ direction = accton_as5512_54x_fan_read_value(CPLD_REG_FAN_DIRECTION_OFFSET); ++ ctrl_speed = accton_as5512_54x_fan_read_value(CPLD_REG_FAN_PWM_CYCLE_OFFSET); ++ ++ if ( (fault < 0) || (r_fault < 0) || (direction < 0) || (ctrl_speed < 0) ) ++ { ++ if (LOCAL_DEBUG) ++ printk ("[Error!!][%s][%d] \n", __FUNCTION__, __LINE__); ++ goto _exit; /* error */ ++ } ++ ++ if (LOCAL_DEBUG) ++ printk ("[fan:] fault:%d, r_fault=%d, direction=%d, ctrl_speed=%d \n",fault, r_fault, direction, ctrl_speed); ++ ++ for (i=0; istatus[i] = (fault & fan_info_mask[i]) >> i; ++ if (LOCAL_DEBUG) ++ printk ("[fan%d:] fail=%d \n",i, fan_data->status[i]); ++ ++ fan_data->r_status[i] = (r_fault & fan_info_mask[i]) >> i; ++ fan_data->direction[i] = (direction & fan_info_mask[i]) >> i; ++ fan_data->duty_cycle[i] = ctrl_speed * FAN_SPEED_PRECENT_TO_CPLD_STEP; ++ ++ /* fan speed ++ */ ++ speed = accton_as5512_54x_fan_read_value(fan_speed_reg[i]); ++ r_speed = accton_as5512_54x_fan_read_value(fanr_speed_reg[i]); ++ if ( (speed < 0) || (r_speed < 0) ) ++ { ++ if (LOCAL_DEBUG) ++ printk ("[Error!!][%s][%d] \n", __FUNCTION__, __LINE__); ++ goto _exit; /* error */ ++ } ++ ++ if (LOCAL_DEBUG) ++ printk ("[fan%d:] speed:%d, r_speed=%d \n", i, speed, r_speed); ++ ++ fan_data->speed[i] = speed * FAN_SPEED_CPLD_TO_RPM_STEP; ++ fan_data->r_speed[i] = r_speed * FAN_SPEED_CPLD_TO_RPM_STEP; ++ } ++ ++ /* finish to update */ ++ fan_data->last_updated = jiffies; ++ fan_data->valid = 1; ++ ++_exit: ++ mutex_unlock(&fan_data->update_lock); ++} ++ ++static int accton_as5512_54x_fan_probe(struct platform_device *pdev) ++{ ++ int status = -1; ++ ++ /* Register sysfs hooks */ ++ status = sysfs_create_group(&pdev->dev.kobj, &accton_as5512_54x_fan_group); ++ if (status) { ++ goto exit; ++ ++ } ++ ++ fan_data->hwmon_dev = hwmon_device_register(&pdev->dev); ++ if (IS_ERR(fan_data->hwmon_dev)) { ++ status = PTR_ERR(fan_data->hwmon_dev); ++ goto exit_remove; ++ } ++ ++ dev_info(&pdev->dev, "accton_as5512_54x_fan\n"); ++ ++ return 0; ++ ++exit_remove: ++ sysfs_remove_group(&pdev->dev.kobj, &accton_as5512_54x_fan_group); ++exit: ++ return status; ++} ++ ++static int accton_as5512_54x_fan_remove(struct platform_device *pdev) ++{ ++ hwmon_device_unregister(fan_data->hwmon_dev); ++ sysfs_remove_group(&fan_data->pdev->dev.kobj, &accton_as5512_54x_fan_group); ++ ++ return 0; ++} ++ ++#define DRVNAME "as5512_54x_fan" ++ ++static struct platform_driver accton_as5512_54x_fan_driver = { ++ .probe = accton_as5512_54x_fan_probe, ++ .remove = accton_as5512_54x_fan_remove, ++ .driver = { ++ .name = DRVNAME, ++ .owner = THIS_MODULE, ++ }, ++}; ++ ++static int __init accton_as5512_54x_fan_init(void) ++{ ++ int ret; ++ ++ extern int platform_accton_as5512_54x(void); ++ if(!platform_accton_as5512_54x()) { ++ return -ENODEV; ++ } ++ ++ ret = platform_driver_register(&accton_as5512_54x_fan_driver); ++ if (ret < 0) { ++ goto exit; ++ } ++ ++ fan_data = kzalloc(sizeof(struct accton_as5512_54x_fan), GFP_KERNEL); ++ if (!fan_data) { ++ ret = -ENOMEM; ++ platform_driver_unregister(&accton_as5512_54x_fan_driver); ++ goto exit; ++ } ++ ++ mutex_init(&fan_data->update_lock); ++ fan_data->valid = 0; ++ ++ fan_data->pdev = platform_device_register_simple(DRVNAME, -1, NULL, 0); ++ if (IS_ERR(fan_data->pdev)) { ++ ret = PTR_ERR(fan_data->pdev); ++ platform_driver_unregister(&accton_as5512_54x_fan_driver); ++ kfree(fan_data); ++ goto exit; ++ } ++ ++exit: ++ return ret; ++} ++ ++static void __exit accton_as5512_54x_fan_exit(void) ++{ ++ platform_device_unregister(fan_data->pdev); ++ platform_driver_unregister(&accton_as5512_54x_fan_driver); ++ kfree(fan_data); ++} ++ ++MODULE_AUTHOR("Brandon Chuang "); ++MODULE_DESCRIPTION("accton_as5512_54x_fan driver"); ++MODULE_LICENSE("GPL"); ++ ++module_init(accton_as5512_54x_fan_init); ++module_exit(accton_as5512_54x_fan_exit); ++ ++ +diff --git a/drivers/hwmon/accton_as5512_54x_psu.c b/drivers/hwmon/accton_as5512_54x_psu.c +new file mode 100644 +index 0000000..66d61f3 +--- /dev/null ++++ b/drivers/hwmon/accton_as5512_54x_psu.c +@@ -0,0 +1,295 @@ ++/* ++ * An hwmon driver for accton as5512_54x Power Module ++ * ++ * Copyright (C) 2015 Accton Technology Corporation. ++ * Brandon Chuang ++ * ++ * Based on ad7414.c ++ * Copyright 2006 Stefan Roese , DENX Software Engineering ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; either version 2 of the License, or ++ * (at your option) any later version. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program; if not, write to the Free Software ++ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++static ssize_t show_index(struct device *dev, struct device_attribute *da, char *buf); ++static ssize_t show_status(struct device *dev, struct device_attribute *da, char *buf); ++static ssize_t show_model_name(struct device *dev, struct device_attribute *da, char *buf); ++static int as5512_54x_psu_read_block(struct i2c_client *client, u8 command, u8 *data,int data_len); ++extern int accton_i2c_cpld_read(unsigned short cpld_addr, u8 reg); ++ ++/* Addresses scanned ++ */ ++static const unsigned short normal_i2c[] = { 0x38, 0x3b, 0x50, 0x53, I2C_CLIENT_END }; ++ ++/* Each client has this additional data ++ */ ++struct as5512_54x_psu_data { ++ struct device *hwmon_dev; ++ struct mutex update_lock; ++ char valid; /* !=0 if registers are valid */ ++ unsigned long last_updated; /* In jiffies */ ++ u8 index; /* PSU index */ ++ u8 status; /* Status(present/power_good) register read from CPLD */ ++ char model_name[14]; /* Model name, read from eeprom */ ++}; ++ ++static struct as5512_54x_psu_data *as5512_54x_psu_update_device(struct device *dev); ++ ++enum as5512_54x_psu_sysfs_attributes { ++ PSU_INDEX, ++ PSU_PRESENT, ++ PSU_MODEL_NAME, ++ PSU_POWER_GOOD ++}; ++ ++/* sysfs attributes for hwmon ++ */ ++static SENSOR_DEVICE_ATTR(psu_index, S_IRUGO, show_index, NULL, PSU_INDEX); ++static SENSOR_DEVICE_ATTR(psu_present, S_IRUGO, show_status, NULL, PSU_PRESENT); ++static SENSOR_DEVICE_ATTR(psu_model_name, S_IRUGO, show_model_name,NULL, PSU_MODEL_NAME); ++static SENSOR_DEVICE_ATTR(psu_power_good, S_IRUGO, show_status, NULL, PSU_POWER_GOOD); ++ ++static struct attribute *as5512_54x_psu_attributes[] = { ++ &sensor_dev_attr_psu_index.dev_attr.attr, ++ &sensor_dev_attr_psu_present.dev_attr.attr, ++ &sensor_dev_attr_psu_model_name.dev_attr.attr, ++ &sensor_dev_attr_psu_power_good.dev_attr.attr, ++ NULL ++}; ++ ++static ssize_t show_index(struct device *dev, struct device_attribute *da, ++ char *buf) ++{ ++ struct i2c_client *client = to_i2c_client(dev); ++ struct as5512_54x_psu_data *data = i2c_get_clientdata(client); ++ ++ return sprintf(buf, "%d\n", data->index); ++} ++ ++static ssize_t show_status(struct device *dev, struct device_attribute *da, ++ char *buf) ++{ ++ struct sensor_device_attribute *attr = to_sensor_dev_attr(da); ++ struct as5512_54x_psu_data *data = as5512_54x_psu_update_device(dev); ++ u8 status = 0; ++ ++ if (attr->index == PSU_PRESENT) { ++ status = !(data->status >> ((data->index - 1) * 4) & 0x1); ++ } ++ else { /* PSU_POWER_GOOD */ ++ status = data->status >> ((data->index - 1) * 4 + 1) & 0x1; ++ } ++ ++ return sprintf(buf, "%d\n", status); ++} ++ ++static ssize_t show_model_name(struct device *dev, struct device_attribute *da, ++ char *buf) ++{ ++ struct as5512_54x_psu_data *data = as5512_54x_psu_update_device(dev); ++ ++ return sprintf(buf, "%s", data->model_name); ++} ++ ++static const struct attribute_group as5512_54x_psu_group = { ++ .attrs = as5512_54x_psu_attributes, ++}; ++ ++static int as5512_54x_psu_probe(struct i2c_client *client, ++ const struct i2c_device_id *dev_id) ++{ ++ struct as5512_54x_psu_data *data; ++ int status; ++ ++ if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_I2C_BLOCK)) { ++ status = -EIO; ++ goto exit; ++ } ++ ++ data = kzalloc(sizeof(struct as5512_54x_psu_data), GFP_KERNEL); ++ if (!data) { ++ status = -ENOMEM; ++ goto exit; ++ } ++ ++ i2c_set_clientdata(client, data); ++ data->valid = 0; ++ mutex_init(&data->update_lock); ++ ++ dev_info(&client->dev, "chip found\n"); ++ ++ /* Register sysfs hooks */ ++ status = sysfs_create_group(&client->dev.kobj, &as5512_54x_psu_group); ++ if (status) { ++ goto exit_free; ++ } ++ ++ data->hwmon_dev = hwmon_device_register(&client->dev); ++ if (IS_ERR(data->hwmon_dev)) { ++ status = PTR_ERR(data->hwmon_dev); ++ goto exit_remove; ++ } ++ ++ /* Update PSU index */ ++ if (client->addr == 0x38 || client->addr == 0x50) { ++ data->index = 1; ++ } ++ else if (client->addr == 0x3b || client->addr == 0x53) { ++ data->index = 2; ++ } ++ ++ dev_info(&client->dev, "%s: psu '%s'\n", ++ dev_name(data->hwmon_dev), client->name); ++ ++ return 0; ++ ++exit_remove: ++ sysfs_remove_group(&client->dev.kobj, &as5512_54x_psu_group); ++exit_free: ++ kfree(data); ++exit: ++ ++ return status; ++} ++ ++static int as5512_54x_psu_remove(struct i2c_client *client) ++{ ++ struct as5512_54x_psu_data *data = i2c_get_clientdata(client); ++ ++ hwmon_device_unregister(data->hwmon_dev); ++ sysfs_remove_group(&client->dev.kobj, &as5512_54x_psu_group); ++ kfree(data); ++ ++ return 0; ++} ++ ++static const struct i2c_device_id as5512_54x_psu_id[] = { ++ { "as5512_54x_psu", 0 }, ++ {} ++}; ++MODULE_DEVICE_TABLE(i2c, as5512_54x_psu_id); ++ ++static struct i2c_driver as5512_54x_psu_driver = { ++ .class = I2C_CLASS_HWMON, ++ .driver = { ++ .name = "as5512_54x_psu", ++ }, ++ .probe = as5512_54x_psu_probe, ++ .remove = as5512_54x_psu_remove, ++ .id_table = as5512_54x_psu_id, ++ .address_list = normal_i2c, ++}; ++ ++static int as5512_54x_psu_read_block(struct i2c_client *client, u8 command, u8 *data, ++ int data_len) ++{ ++ int result = i2c_smbus_read_i2c_block_data(client, command, data_len, data); ++ ++ if (unlikely(result < 0)) ++ goto abort; ++ if (unlikely(result != data_len)) { ++ result = -EIO; ++ goto abort; ++ } ++ ++ result = 0; ++ ++abort: ++ return result; ++} ++ ++static struct as5512_54x_psu_data *as5512_54x_psu_update_device(struct device *dev) ++{ ++ struct i2c_client *client = to_i2c_client(dev); ++ struct as5512_54x_psu_data *data = i2c_get_clientdata(client); ++ ++ mutex_lock(&data->update_lock); ++ ++ if (time_after(jiffies, data->last_updated + HZ + HZ / 2) ++ || !data->valid) { ++ int status = -1; ++ ++ dev_dbg(&client->dev, "Starting as5512_54x update\n"); ++ ++ /* Read model name */ ++ if (client->addr == 0x38 || client->addr == 0x3b) { ++ /* AC power */ ++ status = as5512_54x_psu_read_block(client, 0x26, data->model_name, ++ ARRAY_SIZE(data->model_name)-1); ++ } ++ else { ++ /* DC power */ ++ status = as5512_54x_psu_read_block(client, 0x50, data->model_name, ++ ARRAY_SIZE(data->model_name)-1); ++ } ++ ++ if (status < 0) { ++ data->model_name[0] = '\0'; ++ dev_dbg(&client->dev, "unable to read model name from (0x%x)\n", client->addr); ++ } ++ else { ++ data->model_name[ARRAY_SIZE(data->model_name)-1] = '\0'; ++ } ++ ++ /* Read psu status */ ++ status = accton_i2c_cpld_read(0x60, 0x2); ++ ++ if (status < 0) { ++ dev_dbg(&client->dev, "cpld reg 0x60 err %d\n", status); ++ } ++ else { ++ data->status = status; ++ } ++ ++ data->last_updated = jiffies; ++ data->valid = 1; ++ } ++ ++ mutex_unlock(&data->update_lock); ++ ++ return data; ++} ++ ++static int __init as5512_54x_psu_init(void) ++{ ++ extern int platform_accton_as5512_54x(void); ++ if(!platform_accton_as5512_54x()) { ++ return -ENODEV; ++ } ++ ++ return i2c_add_driver(&as5512_54x_psu_driver); ++} ++ ++static void __exit as5512_54x_psu_exit(void) ++{ ++ i2c_del_driver(&as5512_54x_psu_driver); ++} ++ ++MODULE_AUTHOR("Brandon Chuang "); ++MODULE_DESCRIPTION("accton as5512_54x_psu driver"); ++MODULE_LICENSE("GPL"); ++ ++module_init(as5512_54x_psu_init); ++module_exit(as5512_54x_psu_exit); ++ +diff --git a/drivers/hwmon/accton_i2c_cpld.c b/drivers/hwmon/accton_i2c_cpld.c +index acf88c9..e50c599 100644 +--- a/drivers/hwmon/accton_i2c_cpld.c ++++ b/drivers/hwmon/accton_i2c_cpld.c +@@ -255,6 +255,22 @@ int platform_accton_as5812_54t(void) + } + EXPORT_SYMBOL(platform_accton_as5812_54t); + ++static struct dmi_system_id as5512_54x_dmi_table[] = { ++ { ++ .ident = "Accton AS5512", ++ .matches = { ++ DMI_MATCH(DMI_BOARD_VENDOR, "Accton"), ++ DMI_MATCH(DMI_PRODUCT_NAME, "AS5512"), ++ }, ++ } ++}; ++ ++int platform_accton_as5512_54x(void) ++{ ++ return dmi_check_system(as5512_54x_dmi_table); ++} ++EXPORT_SYMBOL(platform_accton_as5512_54x); ++ + MODULE_AUTHOR("Brandon Chuang "); + MODULE_DESCRIPTION("accton_i2c_cpld driver"); + MODULE_LICENSE("GPL"); +diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig +index 599b97b..9ba4a1b 100644 +--- a/drivers/leds/Kconfig ++++ b/drivers/leds/Kconfig +@@ -88,7 +88,14 @@ config LEDS_ACCTON_AS5812_54t + help + This option enables support for the LEDs on the Accton as5812 54t. + Say Y to enable LEDs on the Accton as5812 54t. +- ++ ++config LEDS_ACCTON_AS5512_54X ++ tristate "LED support for the Accton as5512 54x" ++ depends on LEDS_CLASS && SENSORS_ACCTON_I2C_CPLD ++ help ++ This option enables support for the LEDs on the Accton as5512 54x. ++ Say Y to enable LEDs on the Accton as5512 54x. ++ + config LEDS_LM3530 + tristate "LCD Backlight driver for LM3530" + depends on LEDS_CLASS +diff --git a/drivers/leds/Makefile b/drivers/leds/Makefile +index bd20baa..ff3be6c 100644 +--- a/drivers/leds/Makefile ++++ b/drivers/leds/Makefile +@@ -50,6 +50,7 @@ obj-$(CONFIG_LEDS_ACCTON_AS7712_32x) += leds-accton_as7712_32x.o + obj-$(CONFIG_LEDS_ACCTON_AS5812_54x) += leds-accton_as5812_54x.o + obj-$(CONFIG_LEDS_ACCTON_AS6812_32x) += leds-accton_as6812_32x.o + obj-$(CONFIG_LEDS_ACCTON_AS5812_54t) += leds-accton_as5812_54t.o ++obj-$(CONFIG_LEDS_ACCTON_AS5512_54X) += leds-accton_as5512_54x.o + + # LED SPI Drivers + obj-$(CONFIG_LEDS_DAC124S085) += leds-dac124s085.o +diff --git a/drivers/leds/leds-accton_as5512_54x.c b/drivers/leds/leds-accton_as5512_54x.c +new file mode 100644 +index 0000000..761483a +--- /dev/null ++++ b/drivers/leds/leds-accton_as5512_54x.c +@@ -0,0 +1,463 @@ ++/* ++ * A LED driver for the accton_as5512_54x_led ++ * ++ * Copyright (C) 2015 Accton Technology Corporation. ++ * Brandon Chuang ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; either version 2 of the License, or ++ * (at your option) any later version. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program; if not, write to the Free Software ++ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. ++ */ ++ ++/*#define DEBUG*/ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++extern int accton_i2c_cpld_read(unsigned short cpld_addr, u8 reg); ++extern int accton_i2c_cpld_write(unsigned short cpld_addr, u8 reg, u8 value); ++ ++extern void led_classdev_unregister(struct led_classdev *led_cdev); ++extern int led_classdev_register(struct device *parent, struct led_classdev *led_cdev); ++extern void led_classdev_resume(struct led_classdev *led_cdev); ++extern void led_classdev_suspend(struct led_classdev *led_cdev); ++ ++#define DRVNAME "as5512_54x_led" ++ ++struct accton_as5512_54x_led_data { ++ struct platform_device *pdev; ++ struct mutex update_lock; ++ char valid; /* != 0 if registers are valid */ ++ unsigned long last_updated; /* In jiffies */ ++ u8 reg_val[2]; /* Register value, 0 = LOC/DIAG/FAN LED ++ 1 = PSU1/PSU2 LED */ ++}; ++ ++static struct accton_as5512_54x_led_data *ledctl = NULL; ++ ++/* LED related data ++ */ ++#define LED_TYPE_PSU1_REG_MASK 0x03 ++#define LED_MODE_PSU1_GREEN_MASK 0x02 ++#define LED_MODE_PSU1_AMBER_MASK 0x01 ++#define LED_MODE_PSU1_OFF_MASK 0x03 ++#define LED_MODE_PSU1_AUTO_MASK 0x00 ++ ++#define LED_TYPE_PSU2_REG_MASK 0x0C ++#define LED_MODE_PSU2_GREEN_MASK 0x08 ++#define LED_MODE_PSU2_AMBER_MASK 0x04 ++#define LED_MODE_PSU2_OFF_MASK 0x0C ++#define LED_MODE_PSU2_AUTO_MASK 0x00 ++ ++#define LED_TYPE_DIAG_REG_MASK 0x0C ++#define LED_MODE_DIAG_GREEN_MASK 0x08 ++#define LED_MODE_DIAG_AMBER_MASK 0x04 ++#define LED_MODE_DIAG_OFF_MASK 0x0C ++ ++#define LED_TYPE_FAN_REG_MASK 0x03 ++#define LED_MODE_FAN_GREEN_MASK 0x02 ++#define LED_MODE_FAN_AMBER_MASK 0x01 ++#define LED_MODE_FAN_OFF_MASK 0x03 ++#define LED_MODE_FAN_AUTO_MASK 0x00 ++ ++#define LED_TYPE_LOC_REG_MASK 0x30 ++#define LED_MODE_LOC_ON_MASK 0x00 ++#define LED_MODE_LOC_OFF_MASK 0x10 ++#define LED_MODE_LOC_BLINK_MASK 0x20 ++ ++static const u8 led_reg[] = { ++ 0xA, /* LOC/DIAG/FAN LED*/ ++ 0xB, /* PSU1/PSU2 LED */ ++}; ++ ++enum led_type { ++ LED_TYPE_PSU1, ++ LED_TYPE_PSU2, ++ LED_TYPE_DIAG, ++ LED_TYPE_FAN, ++ LED_TYPE_LOC ++}; ++ ++enum led_light_mode { ++ LED_MODE_OFF = 0, ++ LED_MODE_GREEN, ++ LED_MODE_GREEN_BLINK, ++ LED_MODE_AMBER, ++ LED_MODE_AMBER_BLINK, ++ LED_MODE_RED, ++ LED_MODE_RED_BLINK, ++ LED_MODE_BLUE, ++ LED_MODE_BLUE_BLINK, ++ LED_MODE_AUTO, ++ LED_MODE_UNKNOWN ++}; ++ ++struct led_type_mode { ++ enum led_type type; ++ int type_mask; ++ enum led_light_mode mode; ++ int mode_mask; ++}; ++ ++static struct led_type_mode led_type_mode_data[] = { ++{LED_TYPE_PSU1, LED_TYPE_PSU1_REG_MASK, LED_MODE_GREEN, LED_MODE_PSU1_GREEN_MASK}, ++{LED_TYPE_PSU1, LED_TYPE_PSU1_REG_MASK, LED_MODE_AMBER, LED_MODE_PSU1_AMBER_MASK}, ++{LED_TYPE_PSU1, LED_TYPE_PSU1_REG_MASK, LED_MODE_AUTO, LED_MODE_PSU1_AUTO_MASK}, ++{LED_TYPE_PSU1, LED_TYPE_PSU1_REG_MASK, LED_MODE_OFF, LED_MODE_PSU1_OFF_MASK}, ++{LED_TYPE_PSU2, LED_TYPE_PSU2_REG_MASK, LED_MODE_GREEN, LED_MODE_PSU2_GREEN_MASK}, ++{LED_TYPE_PSU2, LED_TYPE_PSU2_REG_MASK, LED_MODE_AMBER, LED_MODE_PSU2_AMBER_MASK}, ++{LED_TYPE_PSU2, LED_TYPE_PSU2_REG_MASK, LED_MODE_AUTO, LED_MODE_PSU2_AUTO_MASK}, ++{LED_TYPE_PSU2, LED_TYPE_PSU2_REG_MASK, LED_MODE_OFF, LED_MODE_PSU2_OFF_MASK}, ++{LED_TYPE_FAN, LED_TYPE_FAN_REG_MASK, LED_MODE_GREEN, LED_MODE_FAN_GREEN_MASK}, ++{LED_TYPE_FAN, LED_TYPE_FAN_REG_MASK, LED_MODE_AMBER, LED_MODE_FAN_AMBER_MASK}, ++{LED_TYPE_FAN, LED_TYPE_FAN_REG_MASK, LED_MODE_AUTO, LED_MODE_FAN_AUTO_MASK}, ++{LED_TYPE_FAN, LED_TYPE_FAN_REG_MASK, LED_MODE_OFF, LED_MODE_FAN_OFF_MASK}, ++{LED_TYPE_DIAG, LED_TYPE_DIAG_REG_MASK, LED_MODE_GREEN, LED_MODE_DIAG_GREEN_MASK}, ++{LED_TYPE_DIAG, LED_TYPE_DIAG_REG_MASK, LED_MODE_AMBER, LED_MODE_DIAG_AMBER_MASK}, ++{LED_TYPE_DIAG, LED_TYPE_DIAG_REG_MASK, LED_MODE_OFF, LED_MODE_DIAG_OFF_MASK}, ++{LED_TYPE_LOC, LED_TYPE_LOC_REG_MASK, LED_MODE_AMBER, LED_MODE_LOC_ON_MASK}, ++{LED_TYPE_LOC, LED_TYPE_LOC_REG_MASK, LED_MODE_OFF, LED_MODE_LOC_OFF_MASK}, ++{LED_TYPE_LOC, LED_TYPE_LOC_REG_MASK, LED_MODE_AMBER_BLINK, LED_MODE_LOC_BLINK_MASK} ++}; ++ ++static int led_reg_val_to_light_mode(enum led_type type, u8 reg_val) { ++ int i; ++ ++ for (i = 0; i < ARRAY_SIZE(led_type_mode_data); i++) { ++ ++ if (type != led_type_mode_data[i].type) ++ continue; ++ ++ if ((led_type_mode_data[i].type_mask & reg_val) == ++ led_type_mode_data[i].mode_mask) ++ { ++ return led_type_mode_data[i].mode; ++ } ++ } ++ ++ return 0; ++} ++ ++static u8 led_light_mode_to_reg_val(enum led_type type, ++ enum led_light_mode mode, u8 reg_val) { ++ int i; ++ ++ for (i = 0; i < ARRAY_SIZE(led_type_mode_data); i++) { ++ if (type != led_type_mode_data[i].type) ++ continue; ++ ++ if (mode != led_type_mode_data[i].mode) ++ continue; ++ ++ reg_val = led_type_mode_data[i].mode_mask | ++ (reg_val & (~led_type_mode_data[i].type_mask)); ++ } ++ ++ return reg_val; ++} ++ ++static int accton_as5512_54x_led_read_value(u8 reg) ++{ ++ return accton_i2c_cpld_read(0x60, reg); ++} ++ ++static int accton_as5512_54x_led_write_value(u8 reg, u8 value) ++{ ++ return accton_i2c_cpld_write(0x60, reg, value); ++} ++ ++static void accton_as5512_54x_led_update(void) ++{ ++ mutex_lock(&ledctl->update_lock); ++ ++ if (time_after(jiffies, ledctl->last_updated + HZ + HZ / 2) ++ || !ledctl->valid) { ++ int i; ++ ++ dev_dbg(&ledctl->pdev->dev, "Starting accton_as5512_54x_led update\n"); ++ ++ /* Update LED data ++ */ ++ for (i = 0; i < ARRAY_SIZE(ledctl->reg_val); i++) { ++ int status = accton_as5512_54x_led_read_value(led_reg[i]); ++ ++ if (status < 0) { ++ ledctl->valid = 0; ++ dev_dbg(&ledctl->pdev->dev, "reg %d, err %d\n", led_reg[i], status); ++ goto exit; ++ } ++ else ++ { ++ ledctl->reg_val[i] = status; ++ } ++ } ++ ++ ledctl->last_updated = jiffies; ++ ledctl->valid = 1; ++ } ++ ++exit: ++ mutex_unlock(&ledctl->update_lock); ++} ++ ++static void accton_as5512_54x_led_set(struct led_classdev *led_cdev, ++ enum led_brightness led_light_mode, ++ u8 reg, enum led_type type) ++{ ++ int reg_val; ++ ++ mutex_lock(&ledctl->update_lock); ++ ++ reg_val = accton_as5512_54x_led_read_value(reg); ++ ++ if (reg_val < 0) { ++ dev_dbg(&ledctl->pdev->dev, "reg %d, err %d\n", reg, reg_val); ++ goto exit; ++ } ++ ++ reg_val = led_light_mode_to_reg_val(type, led_light_mode, reg_val); ++ accton_as5512_54x_led_write_value(reg, reg_val); ++ ++ /* to prevent the slow-update issue */ ++ ledctl->valid = 0; ++ ++exit: ++ mutex_unlock(&ledctl->update_lock); ++} ++ ++static void accton_as5512_54x_led_psu_1_set(struct led_classdev *led_cdev, ++ enum led_brightness led_light_mode) ++{ ++ accton_as5512_54x_led_set(led_cdev, led_light_mode, led_reg[1], LED_TYPE_PSU1); ++} ++ ++static enum led_brightness accton_as5512_54x_led_psu_1_get(struct led_classdev *cdev) ++{ ++ accton_as5512_54x_led_update(); ++ return led_reg_val_to_light_mode(LED_TYPE_PSU1, ledctl->reg_val[1]); ++} ++ ++static void accton_as5512_54x_led_psu_2_set(struct led_classdev *led_cdev, ++ enum led_brightness led_light_mode) ++{ ++ accton_as5512_54x_led_set(led_cdev, led_light_mode, led_reg[1], LED_TYPE_PSU2); ++} ++ ++static enum led_brightness accton_as5512_54x_led_psu_2_get(struct led_classdev *cdev) ++{ ++ accton_as5512_54x_led_update(); ++ return led_reg_val_to_light_mode(LED_TYPE_PSU2, ledctl->reg_val[1]); ++} ++ ++static void accton_as5512_54x_led_fan_set(struct led_classdev *led_cdev, ++ enum led_brightness led_light_mode) ++{ ++ accton_as5512_54x_led_set(led_cdev, led_light_mode, led_reg[0], LED_TYPE_FAN); ++} ++ ++static enum led_brightness accton_as5512_54x_led_fan_get(struct led_classdev *cdev) ++{ ++ accton_as5512_54x_led_update(); ++ return led_reg_val_to_light_mode(LED_TYPE_FAN, ledctl->reg_val[0]); ++} ++ ++static void accton_as5512_54x_led_diag_set(struct led_classdev *led_cdev, ++ enum led_brightness led_light_mode) ++{ ++ accton_as5512_54x_led_set(led_cdev, led_light_mode, led_reg[0], LED_TYPE_DIAG); ++} ++ ++static enum led_brightness accton_as5512_54x_led_diag_get(struct led_classdev *cdev) ++{ ++ accton_as5512_54x_led_update(); ++ return led_reg_val_to_light_mode(LED_TYPE_DIAG, ledctl->reg_val[0]); ++} ++ ++static void accton_as5512_54x_led_loc_set(struct led_classdev *led_cdev, ++ enum led_brightness led_light_mode) ++{ ++ accton_as5512_54x_led_set(led_cdev, led_light_mode, led_reg[0], LED_TYPE_LOC); ++} ++ ++static enum led_brightness accton_as5512_54x_led_loc_get(struct led_classdev *cdev) ++{ ++ accton_as5512_54x_led_update(); ++ return led_reg_val_to_light_mode(LED_TYPE_LOC, ledctl->reg_val[0]); ++} ++ ++static struct led_classdev accton_as5512_54x_leds[] = { ++ [LED_TYPE_PSU1] = { ++ .name = "accton_as5512_54x_led::psu1", ++ .default_trigger = "unused", ++ .brightness_set = accton_as5512_54x_led_psu_1_set, ++ .brightness_get = accton_as5512_54x_led_psu_1_get, ++ .flags = LED_CORE_SUSPENDRESUME, ++ .max_brightness = LED_MODE_AUTO, ++ }, ++ [LED_TYPE_PSU2] = { ++ .name = "accton_as5512_54x_led::psu2", ++ .default_trigger = "unused", ++ .brightness_set = accton_as5512_54x_led_psu_2_set, ++ .brightness_get = accton_as5512_54x_led_psu_2_get, ++ .flags = LED_CORE_SUSPENDRESUME, ++ .max_brightness = LED_MODE_AUTO, ++ }, ++ [LED_TYPE_FAN] = { ++ .name = "accton_as5512_54x_led::fan", ++ .default_trigger = "unused", ++ .brightness_set = accton_as5512_54x_led_fan_set, ++ .brightness_get = accton_as5512_54x_led_fan_get, ++ .flags = LED_CORE_SUSPENDRESUME, ++ .max_brightness = LED_MODE_AUTO, ++ }, ++ [LED_TYPE_DIAG] = { ++ .name = "accton_as5512_54x_led::diag", ++ .default_trigger = "unused", ++ .brightness_set = accton_as5512_54x_led_diag_set, ++ .brightness_get = accton_as5512_54x_led_diag_get, ++ .flags = LED_CORE_SUSPENDRESUME, ++ .max_brightness = LED_MODE_AUTO, ++ }, ++ [LED_TYPE_LOC] = { ++ .name = "accton_as5512_54x_led::loc", ++ .default_trigger = "unused", ++ .brightness_set = accton_as5512_54x_led_loc_set, ++ .brightness_get = accton_as5512_54x_led_loc_get, ++ .flags = LED_CORE_SUSPENDRESUME, ++ .max_brightness = LED_MODE_AUTO, ++ }, ++}; ++ ++static int accton_as5512_54x_led_suspend(struct platform_device *dev, ++ pm_message_t state) ++{ ++ int i = 0; ++ ++ for (i = 0; i < ARRAY_SIZE(accton_as5512_54x_leds); i++) { ++ led_classdev_suspend(&accton_as5512_54x_leds[i]); ++ } ++ ++ return 0; ++} ++ ++static int accton_as5512_54x_led_resume(struct platform_device *dev) ++{ ++ int i = 0; ++ ++ for (i = 0; i < ARRAY_SIZE(accton_as5512_54x_leds); i++) { ++ led_classdev_resume(&accton_as5512_54x_leds[i]); ++ } ++ ++ return 0; ++} ++ ++static int accton_as5512_54x_led_probe(struct platform_device *pdev) ++{ ++ int ret, i; ++ ++ for (i = 0; i < ARRAY_SIZE(accton_as5512_54x_leds); i++) { ++ ret = led_classdev_register(&pdev->dev, &accton_as5512_54x_leds[i]); ++ ++ if (ret < 0) ++ break; ++ } ++ ++ /* Check if all LEDs were successfully registered */ ++ if (i != ARRAY_SIZE(accton_as5512_54x_leds)){ ++ int j; ++ ++ /* only unregister the LEDs that were successfully registered */ ++ for (j = 0; j < i; j++) { ++ led_classdev_unregister(&accton_as5512_54x_leds[i]); ++ } ++ } ++ ++ return ret; ++} ++ ++static int accton_as5512_54x_led_remove(struct platform_device *pdev) ++{ ++ int i; ++ ++ for (i = 0; i < ARRAY_SIZE(accton_as5512_54x_leds); i++) { ++ led_classdev_unregister(&accton_as5512_54x_leds[i]); ++ } ++ ++ return 0; ++} ++ ++static struct platform_driver accton_as5512_54x_led_driver = { ++ .probe = accton_as5512_54x_led_probe, ++ .remove = accton_as5512_54x_led_remove, ++ .suspend = accton_as5512_54x_led_suspend, ++ .resume = accton_as5512_54x_led_resume, ++ .driver = { ++ .name = DRVNAME, ++ .owner = THIS_MODULE, ++ }, ++}; ++ ++static int __init accton_as5512_54x_led_init(void) ++{ ++ int ret; ++ ++ extern int platform_accton_as5512_54x(void); ++ if(!platform_accton_as5512_54x()) { ++ return -ENODEV; ++ } ++ ++ ret = platform_driver_register(&accton_as5512_54x_led_driver); ++ if (ret < 0) { ++ goto exit; ++ } ++ ++ ledctl = kzalloc(sizeof(struct accton_as5512_54x_led_data), GFP_KERNEL); ++ if (!ledctl) { ++ ret = -ENOMEM; ++ platform_driver_unregister(&accton_as5512_54x_led_driver); ++ goto exit; ++ } ++ ++ mutex_init(&ledctl->update_lock); ++ ++ ledctl->pdev = platform_device_register_simple(DRVNAME, -1, NULL, 0); ++ if (IS_ERR(ledctl->pdev)) { ++ ret = PTR_ERR(ledctl->pdev); ++ platform_driver_unregister(&accton_as5512_54x_led_driver); ++ kfree(ledctl); ++ goto exit; ++ } ++ ++exit: ++ return ret; ++} ++ ++static void __exit accton_as5512_54x_led_exit(void) ++{ ++ platform_device_unregister(ledctl->pdev); ++ platform_driver_unregister(&accton_as5512_54x_led_driver); ++ kfree(ledctl); ++} ++ ++module_init(accton_as5512_54x_led_init); ++module_exit(accton_as5512_54x_led_exit); ++ ++MODULE_AUTHOR("Brandon Chuang "); ++MODULE_DESCRIPTION("accton_as5512_54x_led driver"); ++MODULE_LICENSE("GPL"); ++ +diff --git a/drivers/misc/eeprom/Kconfig b/drivers/misc/eeprom/Kconfig +index c75227b..d90ebe2 100644 +--- a/drivers/misc/eeprom/Kconfig ++++ b/drivers/misc/eeprom/Kconfig +@@ -135,7 +135,16 @@ config EEPROM_ACCTON_AS5812_54t_SFP + + This driver can also be built as a module. If so, the module will + be called accton_as5812_54t_sfp. +- ++ ++config EEPROM_ACCTON_AS5512_54X_SFP ++ tristate "Accton as5512_54x sfp" ++ depends on I2C && SENSORS_ACCTON_I2C_CPLD ++ help ++ If you say yes here you get support for Accton 5512_54x sfp. ++ ++ This driver can also be built as a module. If so, the module will ++ be called accton_5512_54x_sfp. ++ + config EEPROM_93CX6 + tristate "EEPROM 93CX6 support" + help +diff --git a/drivers/misc/eeprom/Makefile b/drivers/misc/eeprom/Makefile +index 152a8bc..907f836 100644 +--- a/drivers/misc/eeprom/Makefile ++++ b/drivers/misc/eeprom/Makefile +@@ -13,4 +13,5 @@ obj-$(CONFIG_EEPROM_ACCTON_AS7712_32x_SFP) += accton_as7712_32x_sfp.o + obj-$(CONFIG_EEPROM_ACCTON_AS5812_54x_SFP) += accton_as5812_54x_sfp.o + obj-$(CONFIG_EEPROM_ACCTON_AS6812_32x_SFP) += accton_as6812_32x_sfp.o + obj-$(CONFIG_EEPROM_ACCTON_AS5812_54t_SFP) += accton_as5812_54t_sfp.o ++obj-$(CONFIG_EEPROM_ACCTON_AS5512_54X_SFP) += accton_as5512_54x_sfp.o + obj-$(CONFIG_EEPROM_SFF_8436) += sff_8436_eeprom.o +diff --git a/drivers/misc/eeprom/accton_as5512_54x_sfp.c b/drivers/misc/eeprom/accton_as5512_54x_sfp.c +new file mode 100644 +index 0000000..d89e71d +--- /dev/null ++++ b/drivers/misc/eeprom/accton_as5512_54x_sfp.c +@@ -0,0 +1,1237 @@ ++/* ++ * SFP driver for accton as5512_54x sfp ++ * ++ * Copyright (C) Brandon Chuang ++ * ++ * Based on ad7414.c ++ * Copyright 2006 Stefan Roese , DENX Software Engineering ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; either version 2 of the License, or ++ * (at your option) any later version. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program; if not, write to the Free Software ++ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#define DRIVER_NAME "as5512_54x_sfp" ++ ++#define DEBUG_MODE 0 ++ ++#if (DEBUG_MODE == 1) ++ #define DEBUG_PRINT(fmt, args...) \ ++ printk (KERN_INFO "%s:%s[%d]: " fmt "\r\n", __FILE__, __FUNCTION__, __LINE__, ##args) ++#else ++ #define DEBUG_PRINT(fmt, args...) ++#endif ++ ++#define NUM_OF_SFP_PORT 54 ++#define EEPROM_NAME "sfp_eeprom" ++#define EEPROM_SIZE 256 /* 256 byte eeprom */ ++#define BIT_INDEX(i) (1ULL << (i)) ++#define USE_I2C_BLOCK_READ 1 ++#define I2C_RW_RETRY_COUNT 3 ++#define I2C_RW_RETRY_INTERVAL 100 /* ms */ ++ ++#define SFP_EEPROM_A0_I2C_ADDR (0xA0 >> 1) ++#define SFP_EEPROM_A2_I2C_ADDR (0xA2 >> 1) ++ ++#define SFF8024_PHYSICAL_DEVICE_ID_ADDR 0x0 ++#define SFF8024_DEVICE_ID_SFP 0x3 ++#define SFF8024_DEVICE_ID_QSFP 0xC ++#define SFF8024_DEVICE_ID_QSFP_PLUS 0xD ++#define SFF8024_DEVICE_ID_QSFP28 0x11 ++ ++#define SFF8472_DIAG_MON_TYPE_ADDR 92 ++#define SFF8472_DIAG_MON_TYPE_DDM_MASK 0x40 ++#define SFF8472_10G_ETH_COMPLIANCE_ADDR 0x3 ++#define SFF8472_10G_BASE_MASK 0xF0 ++ ++#define SFF8436_RX_LOS_ADDR 3 ++#define SFF8436_TX_FAULT_ADDR 4 ++#define SFF8436_TX_DISABLE_ADDR 86 ++ ++static ssize_t sfp_eeprom_read(struct i2c_client *, u8, u8 *,int); ++static ssize_t sfp_eeprom_write(struct i2c_client *, u8 , const char *,int); ++extern int accton_i2c_cpld_read(unsigned short cpld_addr, u8 reg); ++extern int accton_i2c_cpld_write(unsigned short cpld_addr, u8 reg, u8 value); ++ ++/* Addresses scanned ++ */ ++static const unsigned short normal_i2c[] = { SFP_EEPROM_A0_I2C_ADDR, SFP_EEPROM_A2_I2C_ADDR, I2C_CLIENT_END }; ++ ++#define CPLD_PORT_TO_FRONT_PORT(port) (port+1) ++ ++enum port_numbers { ++sfp1, sfp2, sfp3, sfp4, sfp5, sfp6, sfp7, sfp8, ++sfp9, sfp10, sfp11, sfp12, sfp13, sfp14, sfp15, sfp16, ++sfp17, sfp18, sfp19, sfp20, sfp21, sfp22, sfp23, sfp24, ++sfp25, sfp26, sfp27, sfp28, sfp29, sfp30, sfp31, sfp32, ++sfp33, sfp34, sfp35, sfp36, sfp37, sfp38, sfp39, sfp40, ++sfp41, sfp42, sfp43, sfp44, sfp45, sfp46, sfp47, sfp48, ++sfp49, sfp50, sfp51, sfp52, sfp53, sfp54 ++}; ++ ++static const struct i2c_device_id sfp_device_id[] = { ++{ "sfp1", sfp1 }, { "sfp2", sfp2 }, { "sfp3", sfp3 }, { "sfp4", sfp4 }, ++{ "sfp5", sfp5 }, { "sfp6", sfp6 }, { "sfp7", sfp7 }, { "sfp8", sfp8 }, ++{ "sfp9", sfp9 }, { "sfp10", sfp10 }, { "sfp11", sfp11 }, { "sfp12", sfp12 }, ++{ "sfp13", sfp13 }, { "sfp14", sfp14 }, { "sfp15", sfp15 }, { "sfp16", sfp16 }, ++{ "sfp17", sfp17 }, { "sfp18", sfp18 }, { "sfp19", sfp19 }, { "sfp20", sfp20 }, ++{ "sfp21", sfp21 }, { "sfp22", sfp22 }, { "sfp23", sfp23 }, { "sfp24", sfp24 }, ++{ "sfp25", sfp25 }, { "sfp26", sfp26 }, { "sfp27", sfp27 }, { "sfp28", sfp28 }, ++{ "sfp29", sfp29 }, { "sfp30", sfp30 }, { "sfp31", sfp31 }, { "sfp32", sfp32 }, ++{ "sfp33", sfp33 }, { "sfp34", sfp34 }, { "sfp35", sfp35 }, { "sfp36", sfp36 }, ++{ "sfp37", sfp37 }, { "sfp38", sfp38 }, { "sfp39", sfp39 }, { "sfp40", sfp40 }, ++{ "sfp41", sfp41 }, { "sfp42", sfp42 }, { "sfp43", sfp43 }, { "sfp44", sfp44 }, ++{ "sfp45", sfp45 }, { "sfp46", sfp46 }, { "sfp47", sfp47 }, { "sfp48", sfp48 }, ++{ "sfp49", sfp49 }, { "sfp50", sfp50 }, { "sfp51", sfp51 }, { "sfp52", sfp52 }, ++{ "sfp53", sfp53 }, { "sfp54", sfp54 }, ++{ /* LIST END */ } ++}; ++MODULE_DEVICE_TABLE(i2c, sfp_device_id); ++ ++/* ++ * list of valid port types ++ * note OOM_PORT_TYPE_NOT_PRESENT to indicate no ++ * module is present in this port ++ */ ++typedef enum oom_driver_port_type_e { ++ OOM_DRIVER_PORT_TYPE_INVALID, ++ OOM_DRIVER_PORT_TYPE_NOT_PRESENT, ++ OOM_DRIVER_PORT_TYPE_SFP, ++ OOM_DRIVER_PORT_TYPE_SFP_PLUS, ++ OOM_DRIVER_PORT_TYPE_QSFP, ++ OOM_DRIVER_PORT_TYPE_QSFP_PLUS, ++ OOM_DRIVER_PORT_TYPE_QSFP28 ++} oom_driver_port_type_t; ++ ++enum driver_type_e { ++ DRIVER_TYPE_SFP_MSA, ++ DRIVER_TYPE_SFP_DDM, ++ DRIVER_TYPE_QSFP ++}; ++ ++/* Each client has this additional data ++ */ ++struct eeprom_data { ++ char valid; /* !=0 if registers are valid */ ++ unsigned long last_updated; /* In jiffies */ ++ struct bin_attribute bin; /* eeprom data */ ++}; ++ ++struct sfp_msa_data { ++ char valid; /* !=0 if registers are valid */ ++ unsigned long last_updated; /* In jiffies */ ++ u64 status[6]; /* bit0:port0, bit1:port1 and so on */ ++ /* index 0 => tx_fail ++ 1 => tx_disable ++ 2 => rx_loss ++ 3 => device id ++ 4 => 10G Ethernet Compliance Codes ++ to distinguish SFP or SFP+ ++ 5 => DIAGNOSTIC MONITORING TYPE */ ++ struct eeprom_data eeprom; ++}; ++ ++struct sfp_ddm_data { ++ struct eeprom_data eeprom; ++}; ++ ++struct qsfp_data { ++ char valid; /* !=0 if registers are valid */ ++ unsigned long last_updated; /* In jiffies */ ++ u8 status[3]; /* bit0:port0, bit1:port1 and so on */ ++ /* index 0 => tx_fail ++ 1 => tx_disable ++ 2 => rx_loss */ ++ ++ u8 device_id; ++ struct eeprom_data eeprom; ++}; ++ ++struct sfp_port_data { ++ struct mutex update_lock; ++ enum driver_type_e driver_type; ++ int port; /* CPLD port index */ ++ oom_driver_port_type_t port_type; ++ u64 present; /* present status, bit0:port0, bit1:port1 and so on */ ++ ++ struct sfp_msa_data *msa; ++ struct sfp_ddm_data *ddm; ++ struct qsfp_data *qsfp; ++ ++ struct i2c_client *client; ++}; ++ ++enum sfp_sysfs_attributes { ++ PRESENT, ++ PRESENT_ALL, ++ PORT_NUMBER, ++ PORT_TYPE, ++ DDM_IMPLEMENTED, ++ TX_FAULT, ++ TX_FAULT1, ++ TX_FAULT2, ++ TX_FAULT3, ++ TX_FAULT4, ++ TX_DISABLE, ++ TX_DISABLE1, ++ TX_DISABLE2, ++ TX_DISABLE3, ++ TX_DISABLE4, ++ RX_LOS, ++ RX_LOS1, ++ RX_LOS2, ++ RX_LOS3, ++ RX_LOS4, ++ RX_LOS_ALL ++}; ++ ++static ssize_t show_port_number(struct device *dev, struct device_attribute *da, ++ char *buf) ++{ ++ struct i2c_client *client = to_i2c_client(dev); ++ struct sfp_port_data *data = i2c_get_clientdata(client); ++ return sprintf(buf, "%d\n", CPLD_PORT_TO_FRONT_PORT(data->port)); ++} ++ ++static struct sfp_port_data* sfp_update_present(struct i2c_client *client) ++{ ++ int i = 0, j = 0, status = -1; ++ u8 reg; ++ unsigned short cpld_addr; ++ struct sfp_port_data *data = i2c_get_clientdata(client); ++ ++ DEBUG_PRINT("Starting sfp present status update"); ++ mutex_lock(&data->update_lock); ++ data->present = 0; ++ ++ /* Read present status of port 1~48(SFP port) */ ++ for (i = 0; i < 2; i++) { ++ for (j = 0; j < 3; j++) { ++ cpld_addr = 0x61+i; ++ reg = 0x6+j; ++ status = accton_i2c_cpld_read(cpld_addr, reg); ++ ++ if (unlikely(status < 0)) { ++ data = ERR_PTR(status); ++ dev_dbg(&client->dev, "cpld(0x%x) reg(0x%x) err %d\n", cpld_addr, reg, status); ++ goto exit; ++ } ++ ++ DEBUG_PRINT("Present status = 0x%lx\r\n", data->present); ++ data->present |= (u64)status << ((i*24) + (j%3)*8); ++ } ++ } ++ ++ /* Read present status of port 49-54(QSFP port) */ ++ cpld_addr = 0x62; ++ reg = 0x14; ++ status = accton_i2c_cpld_read(cpld_addr, reg); ++ ++ if (unlikely(status < 0)) { ++ data = ERR_PTR(status); ++ dev_dbg(&client->dev, "cpld(0x%x) reg(0x%x) err %d\n", cpld_addr, reg, status); ++ goto exit; ++ } ++ else { ++ data->present |= (u64)status << 48; ++ } ++ ++ DEBUG_PRINT("Present status = 0x%lx", data->present); ++exit: ++ mutex_unlock(&data->update_lock); ++ return data; ++} ++ ++static struct sfp_port_data* sfp_update_tx_rx_status(struct device *dev) ++{ ++ struct i2c_client *client = to_i2c_client(dev); ++ struct sfp_port_data *data = i2c_get_clientdata(client); ++ int i = 0, j = 0; ++ int status = -1; ++ ++ if (time_before(jiffies, data->msa->last_updated + HZ + HZ / 2) && data->msa->valid) { ++ return data; ++ } ++ ++ DEBUG_PRINT("Starting as5512_54x sfp tx rx status update"); ++ mutex_lock(&data->update_lock); ++ data->msa->valid = 0; ++ memset(data->msa->status, 0, sizeof(data->msa->status)); ++ ++ /* Read status of port 1~48(SFP port) */ ++ for (i = 0; i < 2; i++) { ++ for (j = 0; j < 9; j++) { ++ u8 reg; ++ unsigned short cpld_addr; ++ reg = 0x9+j; ++ cpld_addr = 0x61+i; ++ ++ status = accton_i2c_cpld_read(cpld_addr, reg); ++ if (unlikely(status < 0)) { ++ data = ERR_PTR(status); ++ dev_dbg(&client->dev, "cpld(0x%x) reg(0x%x) err %d\n", cpld_addr, reg, status); ++ goto exit; ++ } ++ ++ data->msa->status[j/3] |= (u64)status << ((i*24) + (j%3)*8); ++ } ++ } ++ ++ data->msa->valid = 1; ++ data->msa->last_updated = jiffies; ++ ++exit: ++ mutex_unlock(&data->update_lock); ++ return data; ++} ++ ++static ssize_t sfp_set_tx_disable(struct device *dev, struct device_attribute *da, ++ const char *buf, size_t count) ++{ ++ struct i2c_client *client = to_i2c_client(dev); ++ struct sfp_port_data *data = i2c_get_clientdata(client); ++ unsigned short cpld_addr = 0; ++ u8 cpld_reg = 0, cpld_val = 0, cpld_bit = 0; ++ long disable; ++ int error; ++ ++ error = kstrtol(buf, 10, &disable); ++ if (error) { ++ return error; ++ } ++ ++ mutex_lock(&data->update_lock); ++ ++ if(data->port < 24) { ++ cpld_addr = 0x61; ++ cpld_reg = 0xC + data->port / 8; ++ cpld_bit = 1 << (data->port % 8); ++ } ++ else { /* port 24 ~ 48 */ ++ cpld_addr = 0x62; ++ cpld_reg = 0xC + (data->port - 24) / 8; ++ cpld_bit = 1 << (data->port % 8); ++ } ++ ++ /* Read current status */ ++ cpld_val = accton_i2c_cpld_read(cpld_addr, cpld_reg); ++ ++ /* Update tx_disable status */ ++ if (disable) { ++ data->msa->status[1] |= BIT_INDEX(data->port); ++ cpld_val |= cpld_bit; ++ } ++ else { ++ data->msa->status[1] &= ~BIT_INDEX(data->port); ++ cpld_val &= ~cpld_bit; ++ } ++ ++ accton_i2c_cpld_write(cpld_addr, cpld_reg, cpld_val); ++ mutex_unlock(&data->update_lock); ++ return count; ++} ++ ++static int sfp_is_port_present(struct i2c_client *client, int port) ++{ ++ struct sfp_port_data *data = i2c_get_clientdata(client); ++ ++ data = sfp_update_present(client); ++ if (IS_ERR(data)) { ++ return PTR_ERR(data); ++ } ++ ++ return (data->present & BIT_INDEX(data->port)) ? 0 : 1; ++} ++ ++static ssize_t show_present(struct device *dev, struct device_attribute *da, ++ char *buf) ++{ ++ struct sensor_device_attribute *attr = to_sensor_dev_attr(da); ++ struct i2c_client *client = to_i2c_client(dev); ++ ++ if (PRESENT_ALL == attr->index) { ++ int i; ++ u8 values[7] = {0}; ++ struct sfp_port_data *data = sfp_update_present(client); ++ ++ if (IS_ERR(data)) { ++ return PTR_ERR(data); ++ } ++ ++ for (i = 0; i < ARRAY_SIZE(values); i++) { ++ values[i] = ~(u8)(data->present >> (i * 8)); ++ } ++ ++ /* Return values 1 -> 54 in order */ ++ return sprintf(buf, "%.2x %.2x %.2x %.2x %.2x %.2x %.2x\n", ++ values[0], values[1], values[2], ++ values[3], values[4], values[5], ++ values[6] & 0x3F); ++ } ++ else { ++ struct sfp_port_data *data = i2c_get_clientdata(client); ++ int present = sfp_is_port_present(client, data->port); ++ ++ if (IS_ERR_VALUE(present)) { ++ return present; ++ } ++ ++ /* PRESENT */ ++ return sprintf(buf, "%d\n", present); ++ } ++} ++ ++static struct sfp_port_data *sfp_update_port_type(struct device *dev) ++{ ++ struct i2c_client *client = to_i2c_client(dev); ++ struct sfp_port_data *data = i2c_get_clientdata(client); ++ u8 buf = 0; ++ int status; ++ ++ mutex_lock(&data->update_lock); ++ ++ switch (data->driver_type) { ++ case DRIVER_TYPE_SFP_MSA: ++ { ++ status = sfp_eeprom_read(client, SFF8024_PHYSICAL_DEVICE_ID_ADDR, &buf, sizeof(buf)); ++ if (unlikely(status < 0)) { ++ data->port_type = OOM_DRIVER_PORT_TYPE_INVALID; ++ break; ++ } ++ ++ if (buf != SFF8024_DEVICE_ID_SFP) { ++ data->port_type = OOM_DRIVER_PORT_TYPE_INVALID; ++ break; ++ } ++ ++ status = sfp_eeprom_read(client, SFF8472_10G_ETH_COMPLIANCE_ADDR, &buf, sizeof(buf)); ++ if (unlikely(status < 0)) { ++ data->port_type = OOM_DRIVER_PORT_TYPE_INVALID; ++ break; ++ } ++ ++ DEBUG_PRINT("sfp port type (0x3) data = (0x%x)", buf); ++ data->port_type = buf & SFF8472_10G_BASE_MASK ? OOM_DRIVER_PORT_TYPE_SFP_PLUS : OOM_DRIVER_PORT_TYPE_SFP; ++ break; ++ } ++ case DRIVER_TYPE_QSFP: ++ { ++ status = sfp_eeprom_read(client, SFF8024_PHYSICAL_DEVICE_ID_ADDR, &buf, sizeof(buf)); ++ if (unlikely(status < 0)) { ++ data->port_type = OOM_DRIVER_PORT_TYPE_INVALID; ++ break; ++ } ++ ++ DEBUG_PRINT("qsfp port type (0x0) buf = (0x%x)", buf); ++ switch (buf) { ++ case SFF8024_DEVICE_ID_QSFP: ++ data->port_type = OOM_DRIVER_PORT_TYPE_QSFP; ++ break; ++ case SFF8024_DEVICE_ID_QSFP_PLUS: ++ data->port_type = OOM_DRIVER_PORT_TYPE_QSFP_PLUS; ++ break; ++ case SFF8024_DEVICE_ID_QSFP28: ++ data->port_type = OOM_DRIVER_PORT_TYPE_QSFP_PLUS; ++ break; ++ default: ++ data->port_type = OOM_DRIVER_PORT_TYPE_INVALID; ++ break; ++ } ++ ++ break; ++ } ++ default: ++ break; ++ } ++ ++ mutex_unlock(&data->update_lock); ++ return data; ++} ++ ++static ssize_t show_port_type(struct device *dev, struct device_attribute *da, ++ char *buf) ++{ ++ struct i2c_client *client = to_i2c_client(dev); ++ struct sfp_port_data *data = i2c_get_clientdata(client); ++ int present = sfp_is_port_present(client, data->port); ++ ++ if (IS_ERR_VALUE(present)) { ++ return present; ++ } ++ ++ if (!present) { ++ return sprintf(buf, "%d\n", OOM_DRIVER_PORT_TYPE_NOT_PRESENT); ++ } ++ ++ sfp_update_port_type(dev); ++ return sprintf(buf, "%d\n", data->port_type); ++} ++ ++static struct sfp_port_data* qsfp_update_tx_rx_status(struct device *dev) ++{ ++ struct i2c_client *client = to_i2c_client(dev); ++ struct sfp_port_data *data = i2c_get_clientdata(client); ++ int i, status = -1; ++ u8 buf = 0; ++ u8 reg[] = {SFF8436_TX_FAULT_ADDR, SFF8436_TX_DISABLE_ADDR, SFF8436_RX_LOS_ADDR}; ++ ++ if (time_before(jiffies, data->qsfp->last_updated + HZ + HZ / 2) && data->qsfp->valid) { ++ return data; ++ } ++ ++ DEBUG_PRINT("Starting sfp tx rx status update"); ++ mutex_lock(&data->update_lock); ++ data->qsfp->valid = 0; ++ memset(data->qsfp->status, 0, sizeof(data->qsfp->status)); ++ ++ /* Notify device to update tx fault/ tx disable/ rx los status */ ++ for (i = 0; i < ARRAY_SIZE(reg); i++) { ++ status = sfp_eeprom_read(client, reg[i], &buf, sizeof(buf)); ++ if (unlikely(status < 0)) { ++ data = ERR_PTR(status); ++ goto exit; ++ } ++ } ++ msleep(200); ++ ++ /* Read actual tx fault/ tx disable/ rx los status */ ++ for (i = 0; i < ARRAY_SIZE(reg); i++) { ++ status = sfp_eeprom_read(client, reg[i], &buf, sizeof(buf)); ++ if (unlikely(status < 0)) { ++ data = ERR_PTR(status); ++ goto exit; ++ } ++ ++ DEBUG_PRINT("qsfp reg(0x%x) status = (0x%x)", reg[i], data->qsfp->status[i]); ++ data->qsfp->status[i] = (buf & 0xF); ++ } ++ ++ data->qsfp->valid = 1; ++ data->qsfp->last_updated = jiffies; ++ ++exit: ++ mutex_unlock(&data->update_lock); ++ return data; ++} ++ ++static ssize_t qsfp_show_tx_rx_status(struct device *dev, struct device_attribute *da, ++ char *buf) ++{ ++ int status; ++ u8 val = 0; ++ struct sensor_device_attribute *attr = to_sensor_dev_attr(da); ++ struct i2c_client *client = to_i2c_client(dev); ++ struct sfp_port_data *data = i2c_get_clientdata(client); ++ ++ status = sfp_is_port_present(client, data->port); ++ if (IS_ERR_VALUE(status)) { ++ return status; ++ } ++ ++ data = qsfp_update_tx_rx_status(dev); ++ if (IS_ERR(data)) { ++ return PTR_ERR(data); ++ } ++ ++ switch (attr->index) { ++ case TX_FAULT1: ++ case TX_FAULT2: ++ case TX_FAULT3: ++ case TX_FAULT4: ++ val = (data->qsfp->status[2] & BIT_INDEX(attr->index - TX_FAULT1)) ? 1 : 0; ++ break; ++ case TX_DISABLE1: ++ case TX_DISABLE2: ++ case TX_DISABLE3: ++ case TX_DISABLE4: ++ val = (data->qsfp->status[1] & BIT_INDEX(attr->index - TX_DISABLE1)) ? 1 : 0; ++ break; ++ case RX_LOS1: ++ case RX_LOS2: ++ case RX_LOS3: ++ case RX_LOS4: ++ val = (data->qsfp->status[0] & BIT_INDEX(attr->index - RX_LOS1)) ? 1 : 0; ++ break; ++ default: ++ break; ++ } ++ ++ return sprintf(buf, "%d\n", val); ++} ++ ++static ssize_t qsfp_set_tx_disable(struct device *dev, struct device_attribute *da, ++ const char *buf, size_t count) ++{ ++ long disable; ++ int status; ++ struct sensor_device_attribute *attr = to_sensor_dev_attr(da); ++ struct sfp_port_data *data = NULL; ++ ++ status = kstrtol(buf, 10, &disable); ++ if (status) { ++ return status; ++ } ++ ++ data = qsfp_update_tx_rx_status(dev); ++ if (IS_ERR(data)) { ++ return PTR_ERR(data); ++ } ++ ++ mutex_lock(&data->update_lock); ++ ++ if (disable) { ++ data->qsfp->status[1] |= (1 << (attr->index - TX_DISABLE1)); ++ } ++ else { ++ data->qsfp->status[1] &= ~(1 << (attr->index - TX_DISABLE1)); ++ } ++ ++ DEBUG_PRINT("index = (%d), status = (0x%x)", attr->index, data->qsfp->status[1]); ++ status = sfp_eeprom_write(data->client, SFF8436_TX_DISABLE_ADDR, &data->qsfp->status[1], sizeof(data->qsfp->status[1])); ++ if (unlikely(status < 0)) { ++ count = status; ++ } ++ ++ mutex_unlock(&data->update_lock); ++ return count; ++} ++ ++static ssize_t sfp_show_ddm_implemented(struct device *dev, struct device_attribute *da, ++ char *buf) ++{ ++ int status; ++ char ddm; ++ struct i2c_client *client = to_i2c_client(dev); ++ struct sfp_port_data *data = i2c_get_clientdata(client); ++ ++ status = sfp_is_port_present(client, data->port); ++ if (IS_ERR_VALUE(status)) { ++ return status; ++ } ++ ++ status = sfp_eeprom_read(client, SFF8472_DIAG_MON_TYPE_ADDR, &ddm, sizeof(ddm)); ++ if (unlikely(status < 0)) { ++ return status; ++ } ++ ++ return sprintf(buf, "%d\n", (ddm & SFF8472_DIAG_MON_TYPE_DDM_MASK) ? 1 : 0); ++} ++ ++static ssize_t sfp_show_tx_rx_status(struct device *dev, struct device_attribute *da, ++ char *buf) ++{ ++ u8 val = 0, index = 0; ++ struct sensor_device_attribute *attr = to_sensor_dev_attr(da); ++ struct sfp_port_data *data = sfp_update_tx_rx_status(dev); ++ ++ if (IS_ERR(data)) { ++ return PTR_ERR(data); ++ } ++ ++ if(attr->index == RX_LOS_ALL) { ++ int i = 0; ++ u8 values[6] = {0}; ++ ++ for (i = 0; i < ARRAY_SIZE(values); i++) { ++ values[i] = (u8)(data->msa->status[2] >> (i * 8)); ++ } ++ ++ /** Return values 1 -> 48 in order */ ++ return sprintf(buf, "%.2x %.2x %.2x %.2x %.2x %.2x\n", ++ values[0], values[1], values[2], ++ values[3], values[4], values[5]); ++ } ++ ++ switch (attr->index) { ++ case TX_FAULT: ++ index = 0; ++ break; ++ case TX_DISABLE: ++ index = 1; ++ break; ++ case RX_LOS: ++ index = 2; ++ break; ++ default: ++ break; ++ } ++ ++ val = (data->msa->status[index] & BIT_INDEX(data->port)) ? 1 : 0; ++ return sprintf(buf, "%d\n", val); ++} ++ ++/* SFP/QSFP common attributes for sysfs */ ++static SENSOR_DEVICE_ATTR(sfp_port_number, S_IRUGO, show_port_number, NULL, PORT_NUMBER); ++static SENSOR_DEVICE_ATTR(sfp_port_type, S_IRUGO, show_port_type, NULL, PORT_TYPE); ++static SENSOR_DEVICE_ATTR(sfp_is_present, S_IRUGO, show_present, NULL, PRESENT); ++static SENSOR_DEVICE_ATTR(sfp_is_present_all, S_IRUGO, show_present, NULL, PRESENT_ALL); ++ ++/* QSFP attributes for sysfs */ ++static SENSOR_DEVICE_ATTR(sfp_rx_los1, S_IRUGO, qsfp_show_tx_rx_status, NULL, RX_LOS1); ++static SENSOR_DEVICE_ATTR(sfp_rx_los2, S_IRUGO, qsfp_show_tx_rx_status, NULL, RX_LOS2); ++static SENSOR_DEVICE_ATTR(sfp_rx_los3, S_IRUGO, qsfp_show_tx_rx_status, NULL, RX_LOS3); ++static SENSOR_DEVICE_ATTR(sfp_rx_los4, S_IRUGO, qsfp_show_tx_rx_status, NULL, RX_LOS4); ++static SENSOR_DEVICE_ATTR(sfp_tx_disable1, S_IWUSR | S_IRUGO, qsfp_show_tx_rx_status, qsfp_set_tx_disable, TX_DISABLE1); ++static SENSOR_DEVICE_ATTR(sfp_tx_disable2, S_IWUSR | S_IRUGO, qsfp_show_tx_rx_status, qsfp_set_tx_disable, TX_DISABLE2); ++static SENSOR_DEVICE_ATTR(sfp_tx_disable3, S_IWUSR | S_IRUGO, qsfp_show_tx_rx_status, qsfp_set_tx_disable, TX_DISABLE3); ++static SENSOR_DEVICE_ATTR(sfp_tx_disable4, S_IWUSR | S_IRUGO, qsfp_show_tx_rx_status, qsfp_set_tx_disable, TX_DISABLE4); ++static SENSOR_DEVICE_ATTR(sfp_tx_fault1, S_IRUGO, qsfp_show_tx_rx_status, NULL, TX_FAULT1); ++static SENSOR_DEVICE_ATTR(sfp_tx_fault2, S_IRUGO, qsfp_show_tx_rx_status, NULL, TX_FAULT2); ++static SENSOR_DEVICE_ATTR(sfp_tx_fault3, S_IRUGO, qsfp_show_tx_rx_status, NULL, TX_FAULT3); ++static SENSOR_DEVICE_ATTR(sfp_tx_fault4, S_IRUGO, qsfp_show_tx_rx_status, NULL, TX_FAULT4); ++static struct attribute *qsfp_attributes[] = { ++ &sensor_dev_attr_sfp_port_number.dev_attr.attr, ++ &sensor_dev_attr_sfp_port_type.dev_attr.attr, ++ &sensor_dev_attr_sfp_is_present.dev_attr.attr, ++ &sensor_dev_attr_sfp_is_present_all.dev_attr.attr, ++ &sensor_dev_attr_sfp_rx_los1.dev_attr.attr, ++ &sensor_dev_attr_sfp_rx_los2.dev_attr.attr, ++ &sensor_dev_attr_sfp_rx_los3.dev_attr.attr, ++ &sensor_dev_attr_sfp_rx_los4.dev_attr.attr, ++ &sensor_dev_attr_sfp_tx_disable1.dev_attr.attr, ++ &sensor_dev_attr_sfp_tx_disable2.dev_attr.attr, ++ &sensor_dev_attr_sfp_tx_disable3.dev_attr.attr, ++ &sensor_dev_attr_sfp_tx_disable4.dev_attr.attr, ++ &sensor_dev_attr_sfp_tx_fault1.dev_attr.attr, ++ &sensor_dev_attr_sfp_tx_fault2.dev_attr.attr, ++ &sensor_dev_attr_sfp_tx_fault3.dev_attr.attr, ++ &sensor_dev_attr_sfp_tx_fault4.dev_attr.attr, ++ NULL ++}; ++ ++/* SFP msa attributes for sysfs */ ++static SENSOR_DEVICE_ATTR(sfp_ddm_implemented, S_IRUGO, sfp_show_ddm_implemented, NULL, DDM_IMPLEMENTED); ++static SENSOR_DEVICE_ATTR(sfp_rx_los, S_IRUGO, sfp_show_tx_rx_status, NULL, RX_LOS); ++static SENSOR_DEVICE_ATTR(sfp_rx_los_all, S_IRUGO, sfp_show_tx_rx_status, NULL, RX_LOS_ALL); ++static SENSOR_DEVICE_ATTR(sfp_tx_disable, S_IWUSR | S_IRUGO, sfp_show_tx_rx_status, sfp_set_tx_disable, TX_DISABLE); ++static SENSOR_DEVICE_ATTR(sfp_tx_fault, S_IRUGO, sfp_show_tx_rx_status, NULL, TX_FAULT); ++static struct attribute *sfp_msa_attributes[] = { ++ &sensor_dev_attr_sfp_port_number.dev_attr.attr, ++ &sensor_dev_attr_sfp_port_type.dev_attr.attr, ++ &sensor_dev_attr_sfp_is_present.dev_attr.attr, ++ &sensor_dev_attr_sfp_is_present_all.dev_attr.attr, ++ &sensor_dev_attr_sfp_ddm_implemented.dev_attr.attr, ++ &sensor_dev_attr_sfp_tx_fault.dev_attr.attr, ++ &sensor_dev_attr_sfp_rx_los.dev_attr.attr, ++ &sensor_dev_attr_sfp_rx_los_all.dev_attr.attr, ++ &sensor_dev_attr_sfp_tx_disable.dev_attr.attr, ++ NULL ++}; ++ ++/* SFP ddm attributes for sysfs */ ++static struct attribute *sfp_ddm_attributes[] = { ++ NULL ++}; ++ ++static ssize_t sfp_eeprom_write(struct i2c_client *client, u8 command, const char *data, ++ int data_len) ++{ ++#if USE_I2C_BLOCK_READ ++ int status, retry = I2C_RW_RETRY_COUNT; ++ ++ if (data_len > I2C_SMBUS_BLOCK_MAX) { ++ data_len = I2C_SMBUS_BLOCK_MAX; ++ } ++ ++ while (retry) { ++ status = i2c_smbus_write_i2c_block_data(client, command, data_len, data); ++ if (unlikely(status < 0)) { ++ msleep(I2C_RW_RETRY_INTERVAL); ++ retry--; ++ continue; ++ } ++ ++ break; ++ } ++ ++ if (unlikely(status < 0)) { ++ return status; ++ } ++ ++ return data_len; ++#else ++ int status, retry = I2C_RW_RETRY_COUNT; ++ ++ while (retry) { ++ status = i2c_smbus_write_byte_data(client, command, *data); ++ if (unlikely(status < 0)) { ++ msleep(I2C_RW_RETRY_INTERVAL); ++ retry--; ++ continue; ++ } ++ ++ break; ++ } ++ ++ if (unlikely(status < 0)) { ++ return status; ++ } ++ ++ return 1; ++#endif ++ ++ ++} ++ ++static ssize_t sfp_port_write(struct sfp_port_data *data, ++ const char *buf, loff_t off, size_t count) ++{ ++ ssize_t retval = 0; ++ ++ if (unlikely(!count)) { ++ return count; ++ } ++ ++ /* ++ * Write data to chip, protecting against concurrent updates ++ * from this host, but not from other I2C masters. ++ */ ++ mutex_lock(&data->update_lock); ++ ++ while (count) { ++ ssize_t status; ++ ++ status = sfp_eeprom_write(data->client, off, buf, count); ++ if (status <= 0) { ++ if (retval == 0) { ++ retval = status; ++ } ++ break; ++ } ++ buf += status; ++ off += status; ++ count -= status; ++ retval += status; ++ } ++ ++ mutex_unlock(&data->update_lock); ++ return retval; ++} ++ ++ ++static ssize_t sfp_bin_write(struct file *filp, struct kobject *kobj, ++ struct bin_attribute *attr, ++ char *buf, loff_t off, size_t count) ++{ ++ struct sfp_port_data *data; ++ DEBUG_PRINT("%s(%d) offset = (%d), count = (%d)", off, count); ++ data = dev_get_drvdata(container_of(kobj, struct device, kobj)); ++ return sfp_port_write(data, buf, off, count); ++} ++ ++static ssize_t sfp_eeprom_read(struct i2c_client *client, u8 command, u8 *data, ++ int data_len) ++{ ++#if USE_I2C_BLOCK_READ ++ int status, retry = I2C_RW_RETRY_COUNT; ++ ++ if (data_len > I2C_SMBUS_BLOCK_MAX) { ++ data_len = I2C_SMBUS_BLOCK_MAX; ++ } ++ ++ while (retry) { ++ status = i2c_smbus_read_i2c_block_data(client, command, data_len, data); ++ if (unlikely(status < 0)) { ++ msleep(I2C_RW_RETRY_INTERVAL); ++ retry--; ++ continue; ++ } ++ ++ break; ++ } ++ ++ if (unlikely(status < 0)) { ++ goto abort; ++ } ++ if (unlikely(status != data_len)) { ++ status = -EIO; ++ goto abort; ++ } ++ ++ //result = data_len; ++ ++abort: ++ return status; ++#else ++ int status, retry = I2C_RW_RETRY_COUNT; ++ ++ while (retry) { ++ status = i2c_smbus_read_byte_data(client, command); ++ if (unlikely(status < 0)) { ++ msleep(I2C_RW_RETRY_INTERVAL); ++ retry--; ++ continue; ++ } ++ ++ break; ++ } ++ ++ if (unlikely(status < 0)) { ++ dev_dbg(&client->dev, "sfp read byte data failed, command(0x%2x), data(0x%2x)\r\n", command, result); ++ goto abort; ++ } ++ ++ *data = (u8)status; ++ status = 1; ++ ++abort: ++ return status; ++#endif ++} ++ ++static ssize_t sfp_port_read(struct sfp_port_data *data, ++ char *buf, loff_t off, size_t count) ++{ ++ ssize_t retval = 0; ++ ++ if (unlikely(!count)) { ++ DEBUG_PRINT("Count = 0, return"); ++ return count; ++ } ++ ++ /* ++ * Read data from chip, protecting against concurrent updates ++ * from this host, but not from other I2C masters. ++ */ ++ mutex_lock(&data->update_lock); ++ ++ while (count) { ++ ssize_t status; ++ ++ status = sfp_eeprom_read(data->client, off, buf, count); ++ if (status <= 0) { ++ if (retval == 0) { ++ retval = status; ++ } ++ break; ++ } ++ ++ buf += status; ++ off += status; ++ count -= status; ++ retval += status; ++ } ++ ++ mutex_unlock(&data->update_lock); ++ return retval; ++ ++} ++ ++static ssize_t sfp_bin_read(struct file *filp, struct kobject *kobj, ++ struct bin_attribute *attr, ++ char *buf, loff_t off, size_t count) ++{ ++ struct sfp_port_data *data; ++ DEBUG_PRINT("offset = (%d), count = (%d)", off, count); ++ data = dev_get_drvdata(container_of(kobj, struct device, kobj)); ++ return sfp_port_read(data, buf, off, count); ++} ++ ++static int sfp_sysfs_eeprom_init(struct kobject *kobj, struct bin_attribute *eeprom) ++{ ++ int err; ++ ++ sysfs_bin_attr_init(eeprom); ++ eeprom->attr.name = EEPROM_NAME; ++ eeprom->attr.mode = S_IWUSR | S_IRUGO; ++ eeprom->read = sfp_bin_read; ++ eeprom->write = sfp_bin_write; ++ eeprom->size = EEPROM_SIZE; ++ ++ /* Create eeprom file */ ++ err = sysfs_create_bin_file(kobj, eeprom); ++ if (err) { ++ return err; ++ } ++ ++ return 0; ++} ++ ++static int sfp_sysfs_eeprom_cleanup(struct kobject *kobj, struct bin_attribute *eeprom) ++{ ++ sysfs_remove_bin_file(kobj, eeprom); ++ return 0; ++} ++ ++static const struct attribute_group sfp_msa_group = { ++ .attrs = sfp_msa_attributes, ++}; ++ ++static int sfp_i2c_check_functionality(struct i2c_client *client) ++{ ++#if USE_I2C_BLOCK_READ ++ return i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_I2C_BLOCK); ++#else ++ return i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA); ++#endif ++} ++ ++static int sfp_msa_probe(struct i2c_client *client, const struct i2c_device_id *dev_id, ++ struct sfp_msa_data **data) ++{ ++ int status; ++ struct sfp_msa_data *msa; ++ ++ if (!sfp_i2c_check_functionality(client)) { ++ status = -EIO; ++ goto exit; ++ } ++ ++ msa = kzalloc(sizeof(struct sfp_msa_data), GFP_KERNEL); ++ if (!msa) { ++ status = -ENOMEM; ++ goto exit; ++ } ++ ++ /* Register sysfs hooks */ ++ status = sysfs_create_group(&client->dev.kobj, &sfp_msa_group); ++ if (status) { ++ goto exit_free; ++ } ++ ++ /* init eeprom */ ++ status = sfp_sysfs_eeprom_init(&client->dev.kobj, &msa->eeprom.bin); ++ if (status) { ++ goto exit_remove; ++ } ++ ++ *data = msa; ++ dev_info(&client->dev, "sfp msa '%s'\n", client->name); ++ ++ return 0; ++ ++exit_remove: ++ sysfs_remove_group(&client->dev.kobj, &sfp_msa_group); ++exit_free: ++ kfree(msa); ++exit: ++ ++ return status; ++} ++ ++static const struct attribute_group sfp_ddm_group = { ++ .attrs = sfp_ddm_attributes, ++}; ++ ++static int sfp_ddm_probe(struct i2c_client *client, const struct i2c_device_id *dev_id, ++ struct sfp_ddm_data **data) ++{ ++ int status; ++ struct sfp_ddm_data *ddm; ++ ++ if (!sfp_i2c_check_functionality(client)) { ++ status = -EIO; ++ goto exit; ++ } ++ ++ ddm = kzalloc(sizeof(struct sfp_ddm_data), GFP_KERNEL); ++ if (!ddm) { ++ status = -ENOMEM; ++ goto exit; ++ } ++ ++ /* Register sysfs hooks */ ++ status = sysfs_create_group(&client->dev.kobj, &sfp_ddm_group); ++ if (status) { ++ goto exit_free; ++ } ++ ++ /* init eeprom */ ++ status = sfp_sysfs_eeprom_init(&client->dev.kobj, &ddm->eeprom.bin); ++ if (status) { ++ goto exit_remove; ++ } ++ ++ *data = ddm; ++ dev_info(&client->dev, "sfp ddm '%s'\n", client->name); ++ ++ return 0; ++ ++exit_remove: ++ sysfs_remove_group(&client->dev.kobj, &sfp_ddm_group); ++exit_free: ++ kfree(ddm); ++exit: ++ ++ return status; ++} ++ ++static const struct attribute_group qsfp_group = { ++ .attrs = qsfp_attributes, ++}; ++ ++static int qsfp_probe(struct i2c_client *client, const struct i2c_device_id *dev_id, ++ struct qsfp_data **data) ++{ ++ int status; ++ struct qsfp_data *qsfp; ++ ++ if (!sfp_i2c_check_functionality(client)) { ++ status = -EIO; ++ goto exit; ++ } ++ ++ qsfp = kzalloc(sizeof(struct qsfp_data), GFP_KERNEL); ++ if (!qsfp) { ++ status = -ENOMEM; ++ goto exit; ++ } ++ ++ /* Register sysfs hooks */ ++ status = sysfs_create_group(&client->dev.kobj, &qsfp_group); ++ if (status) { ++ goto exit_free; ++ } ++ ++ /* init eeprom */ ++ status = sfp_sysfs_eeprom_init(&client->dev.kobj, &qsfp->eeprom.bin); ++ if (status) { ++ goto exit_remove; ++ } ++ ++ /* Bring QSFPs out of reset */ ++ accton_i2c_cpld_write(0x62, 0x15, 0x3F); ++ ++ *data = qsfp; ++ dev_info(&client->dev, "qsfp '%s'\n", client->name); ++ ++ return 0; ++ ++exit_remove: ++ sysfs_remove_group(&client->dev.kobj, &qsfp_group); ++exit_free: ++ kfree(qsfp); ++exit: ++ ++ return status; ++} ++ ++static int sfp_device_probe(struct i2c_client *client, ++ const struct i2c_device_id *dev_id) ++{ ++ struct sfp_port_data *data = NULL; ++ ++ data = kzalloc(sizeof(struct sfp_port_data), GFP_KERNEL); ++ if (!data) { ++ return -ENOMEM; ++ } ++ ++ i2c_set_clientdata(client, data); ++ mutex_init(&data->update_lock); ++ data->port = dev_id->driver_data; ++ data->client = client; ++ ++ if (dev_id->driver_data >= sfp1 && dev_id->driver_data <= sfp48) { ++ if (client->addr == SFP_EEPROM_A0_I2C_ADDR) { ++ data->driver_type = DRIVER_TYPE_SFP_MSA; ++ return sfp_msa_probe(client, dev_id, &data->msa); ++ } ++ else if (client->addr == SFP_EEPROM_A2_I2C_ADDR) { ++ data->driver_type = DRIVER_TYPE_SFP_DDM; ++ return sfp_ddm_probe(client, dev_id, &data->ddm); ++ } ++ } ++ else { /* sfp49 ~ sfp54 */ ++ if (client->addr == SFP_EEPROM_A0_I2C_ADDR) { ++ data->driver_type = DRIVER_TYPE_QSFP; ++ return qsfp_probe(client, dev_id, &data->qsfp); ++ } ++ } ++ ++ return -ENODEV; ++} ++ ++static int sfp_msa_remove(struct i2c_client *client, struct sfp_msa_data *data) ++{ ++ sfp_sysfs_eeprom_cleanup(&client->dev.kobj, &data->eeprom.bin); ++ sysfs_remove_group(&client->dev.kobj, &sfp_msa_group); ++ kfree(data); ++ return 0; ++} ++ ++static int sfp_ddm_remove(struct i2c_client *client, struct sfp_ddm_data *data) ++{ ++ sfp_sysfs_eeprom_cleanup(&client->dev.kobj, &data->eeprom.bin); ++ sysfs_remove_group(&client->dev.kobj, &sfp_ddm_group); ++ kfree(data); ++ return 0; ++} ++ ++static int qfp_remove(struct i2c_client *client, struct qsfp_data *data) ++{ ++ sfp_sysfs_eeprom_cleanup(&client->dev.kobj, &data->eeprom.bin); ++ sysfs_remove_group(&client->dev.kobj, &qsfp_group); ++ kfree(data); ++ return 0; ++} ++ ++static int sfp_device_remove(struct i2c_client *client) ++{ ++ struct sfp_port_data *data = i2c_get_clientdata(client); ++ ++ switch (data->driver_type) { ++ case DRIVER_TYPE_SFP_MSA: ++ return sfp_msa_remove(client, data->msa); ++ case DRIVER_TYPE_SFP_DDM: ++ return sfp_ddm_remove(client, data->ddm); ++ case DRIVER_TYPE_QSFP: ++ return qfp_remove(client, data->qsfp); ++ } ++ ++ return 0; ++} ++ ++static struct i2c_driver sfp_driver = { ++ .driver = { ++ .name = DRIVER_NAME, ++ }, ++ .probe = sfp_device_probe, ++ .remove = sfp_device_remove, ++ .id_table = sfp_device_id, ++ .address_list = normal_i2c, ++}; ++ ++static int __init sfp_init(void) ++{ ++ extern int platform_accton_as5512_54x(void); ++ if(!platform_accton_as5512_54x()) { ++ return -ENODEV; ++ } ++ ++ return i2c_add_driver(&sfp_driver); ++} ++ ++static void __exit sfp_exit(void) ++{ ++ i2c_del_driver(&sfp_driver); ++} ++ ++MODULE_AUTHOR("Brandon Chuang "); ++MODULE_DESCRIPTION("accton as5512_54x_sfp driver"); ++MODULE_LICENSE("GPL"); ++ ++module_init(sfp_init); ++module_exit(sfp_exit); ++ ++ diff --git a/packages/base/any/kernels/3.2.65-1+deb7u2/patches/platform-accton-as5712_54x-device-drivers.patch b/packages/base/any/kernels/3.2.65-1+deb7u2/patches/platform-accton-as5712_54x-device-drivers.patch index fc610c5f..67bacd03 100644 --- a/packages/base/any/kernels/3.2.65-1+deb7u2/patches/platform-accton-as5712_54x-device-drivers.patch +++ b/packages/base/any/kernels/3.2.65-1+deb7u2/patches/platform-accton-as5712_54x-device-drivers.patch @@ -869,7 +869,7 @@ index 0000000..6381db5 + { + .ident = "Accton AS5712", + .matches = { -+ DMI_MATCH(DMI_SYS_VENDOR, "Accton"), ++ DMI_MATCH(DMI_BOARD_VENDOR, "Accton"), + DMI_MATCH(DMI_PRODUCT_NAME, "AS5712"), + }, + } diff --git a/packages/base/any/kernels/3.2.65-1+deb7u2/patches/platform-accton-as5812_54t-device-drivers.patch b/packages/base/any/kernels/3.2.65-1+deb7u2/patches/platform-accton-as5812_54t-device-drivers.patch index dfe66f46..5eedbb50 100644 --- a/packages/base/any/kernels/3.2.65-1+deb7u2/patches/platform-accton-as5812_54t-device-drivers.patch +++ b/packages/base/any/kernels/3.2.65-1+deb7u2/patches/platform-accton-as5812_54t-device-drivers.patch @@ -846,7 +846,7 @@ index 3aeb08d..acf88c9 100644 + { + .ident = "Accton AS5812 54t", + .matches = { -+ DMI_MATCH(DMI_SYS_VENDOR, "Accton"), ++ DMI_MATCH(DMI_BOARD_VENDOR, "Accton"), + DMI_MATCH(DMI_PRODUCT_NAME, "AS5812-54T"), + }, + } diff --git a/packages/base/any/kernels/3.2.65-1+deb7u2/patches/platform-accton-as5812_54x-device-drivers.patch b/packages/base/any/kernels/3.2.65-1+deb7u2/patches/platform-accton-as5812_54x-device-drivers.patch index 512715df..92c6d201 100644 --- a/packages/base/any/kernels/3.2.65-1+deb7u2/patches/platform-accton-as5812_54x-device-drivers.patch +++ b/packages/base/any/kernels/3.2.65-1+deb7u2/patches/platform-accton-as5812_54x-device-drivers.patch @@ -868,7 +868,7 @@ index 0000000..e01e557 + { + .ident = "Accton AS5812-54X", + .matches = { -+ DMI_MATCH(DMI_SYS_VENDOR, "Accton"), ++ DMI_MATCH(DMI_BOARD_VENDOR, "Accton"), + DMI_MATCH(DMI_PRODUCT_NAME, "AS5812-54X"), + }, + } diff --git a/packages/base/any/kernels/3.2.65-1+deb7u2/patches/platform-accton-as6712_32x-device-drivers.patch b/packages/base/any/kernels/3.2.65-1+deb7u2/patches/platform-accton-as6712_32x-device-drivers.patch index 4b1220f0..05377b0d 100644 --- a/packages/base/any/kernels/3.2.65-1+deb7u2/patches/platform-accton-as6712_32x-device-drivers.patch +++ b/packages/base/any/kernels/3.2.65-1+deb7u2/patches/platform-accton-as6712_32x-device-drivers.patch @@ -878,7 +878,7 @@ index 0000000..2ec0a59 + { + .ident = "Accton AS6712", + .matches = { -+ DMI_MATCH(DMI_SYS_VENDOR, "Accton"), ++ DMI_MATCH(DMI_BOARD_VENDOR, "Accton"), + DMI_MATCH(DMI_PRODUCT_NAME, "AS6712"), + }, + } diff --git a/packages/base/any/kernels/3.2.65-1+deb7u2/patches/platform-accton-as6812_32x-device-drivers.patch b/packages/base/any/kernels/3.2.65-1+deb7u2/patches/platform-accton-as6812_32x-device-drivers.patch index 3501563d..b71ad20a 100644 --- a/packages/base/any/kernels/3.2.65-1+deb7u2/patches/platform-accton-as6812_32x-device-drivers.patch +++ b/packages/base/any/kernels/3.2.65-1+deb7u2/patches/platform-accton-as6812_32x-device-drivers.patch @@ -871,7 +871,7 @@ index 0000000..d668ca4 + { + .ident = "Accton AS6812", + .matches = { -+ DMI_MATCH(DMI_SYS_VENDOR, "Accton"), ++ DMI_MATCH(DMI_BOARD_VENDOR, "Accton"), + DMI_MATCH(DMI_PRODUCT_NAME, "AS6812"), + }, + } diff --git a/packages/base/any/kernels/3.2.65-1+deb7u2/patches/platform-accton-as7512_32x-device-drivers.patch b/packages/base/any/kernels/3.2.65-1+deb7u2/patches/platform-accton-as7512_32x-device-drivers.patch index 3feb3823..fbecedd4 100644 --- a/packages/base/any/kernels/3.2.65-1+deb7u2/patches/platform-accton-as7512_32x-device-drivers.patch +++ b/packages/base/any/kernels/3.2.65-1+deb7u2/patches/platform-accton-as7512_32x-device-drivers.patch @@ -1080,7 +1080,7 @@ index 0000000..96e3490 + { + .ident = "Accton AS7512", + .matches = { -+ DMI_MATCH(DMI_SYS_VENDOR, "Accton"), ++ DMI_MATCH(DMI_BOARD_VENDOR, "Accton"), + DMI_MATCH(DMI_PRODUCT_NAME, "AS7512"), + }, + } diff --git a/packages/base/any/kernels/3.2.65-1+deb7u2/patches/platform-accton-as7712_32x-device-drivers.patch b/packages/base/any/kernels/3.2.65-1+deb7u2/patches/platform-accton-as7712_32x-device-drivers.patch index 76aae312..2f48944c 100644 --- a/packages/base/any/kernels/3.2.65-1+deb7u2/patches/platform-accton-as7712_32x-device-drivers.patch +++ b/packages/base/any/kernels/3.2.65-1+deb7u2/patches/platform-accton-as7712_32x-device-drivers.patch @@ -811,7 +811,7 @@ index 96e3490..3aeb08d 100644 + { + .ident = "Accton AS7712", + .matches = { -+ DMI_MATCH(DMI_SYS_VENDOR, "Accton"), ++ DMI_MATCH(DMI_BOARD_VENDOR, "Accton"), + DMI_MATCH(DMI_PRODUCT_NAME, "AS7712"), + }, + } diff --git a/packages/base/any/kernels/3.2.65-1+deb7u2/patches/platform-accton-as7716_32x-device-drivers.patch b/packages/base/any/kernels/3.2.65-1+deb7u2/patches/platform-accton-as7716_32x-device-drivers.patch index 73ea06b2..51fca17e 100644 --- a/packages/base/any/kernels/3.2.65-1+deb7u2/patches/platform-accton-as7716_32x-device-drivers.patch +++ b/packages/base/any/kernels/3.2.65-1+deb7u2/patches/platform-accton-as7716_32x-device-drivers.patch @@ -1,15 +1,13 @@ Device driver patches for accton as7716-32x (fan/psu/cpld/led/sfp) diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig -index 89c619d..42abae5 100644 +index 968bd5f..bc10314 100644 --- a/drivers/hwmon/Kconfig +++ b/drivers/hwmon/Kconfig -@@ -1573,7 +1573,25 @@ config SENSORS_ACCTON_AS5812_54t_PSU - +@@ -1592,6 +1592,24 @@ config SENSORS_ACCTON_AS5512_54X_FAN This driver can also be built as a module. If so, the module will - be called accton_as5812_54t_psu. -- -+ + be called accton_as5512_54x_fan. + +config SENSORS_ACCTON_AS7716_32x_FAN + tristate "Accton as7716 32x fan" + depends on I2C && SENSORS_ACCTON_I2C_CPLD @@ -20,25 +18,25 @@ index 89c619d..42abae5 100644 + be called accton_as7716_32x_fan. + +config SENSORS_ACCTON_AS7716_32x_PSU -+ tristate "Accton as7716 32x psu" -+ depends on I2C && SENSORS_ACCTON_I2C_CPLD -+ help -+ If you say yes here you get support for Accton as7716 32x psu. ++ tristate "Accton as7716 32x psu" ++ depends on I2C && SENSORS_ACCTON_I2C_CPLD ++ help ++ If you say yes here you get support for Accton as7716 32x psu. ++ ++ This driver can also be built as a module. If so, the module will ++ be called accton_as7716_32x_psu. + -+ This driver can also be built as a module. If so, the module will -+ be called accton_as7716_32x_psu. -+ if ACPI comment "ACPI drivers" diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile -index de922bc..9210ab0 100644 +index b8ee7b0..851d90a 100644 --- a/drivers/hwmon/Makefile +++ b/drivers/hwmon/Makefile -@@ -36,6 +36,8 @@ obj-$(CONFIG_SENSORS_ACCTON_AS6812_32x_FAN) += accton_as6812_32x_fan.o - obj-$(CONFIG_SENSORS_ACCTON_AS6812_32x_PSU) += accton_as6812_32x_psu.o - obj-$(CONFIG_SENSORS_ACCTON_AS5812_54t_FAN) += accton_as5812_54t_fan.o +@@ -38,6 +38,8 @@ obj-$(CONFIG_SENSORS_ACCTON_AS5812_54t_FAN) += accton_as5812_54t_fan.o obj-$(CONFIG_SENSORS_ACCTON_AS5812_54t_PSU) += accton_as5812_54t_psu.o + obj-$(CONFIG_SENSORS_ACCTON_AS5512_54X_PSU) += accton_as5512_54x_psu.o + obj-$(CONFIG_SENSORS_ACCTON_AS5512_54X_FAN) += accton_as5512_54x_fan.o +obj-$(CONFIG_SENSORS_ACCTON_AS7716_32x_FAN) += accton_as7716_32x_fan.o +obj-$(CONFIG_SENSORS_ACCTON_AS7716_32x_PSU) += accton_as7716_32x_psu.o obj-$(CONFIG_SENSORS_AD7314) += ad7314.o @@ -802,26 +800,26 @@ index 0000000..4fd15ae +MODULE_LICENSE("GPL"); + diff --git a/drivers/hwmon/accton_i2c_cpld.c b/drivers/hwmon/accton_i2c_cpld.c -index acf88c9..95202ec 100644 +index e50c599..89e3a0e 100644 --- a/drivers/hwmon/accton_i2c_cpld.c +++ b/drivers/hwmon/accton_i2c_cpld.c -@@ -255,6 +255,22 @@ int platform_accton_as5812_54t(void) +@@ -271,6 +271,22 @@ int platform_accton_as5512_54x(void) } - EXPORT_SYMBOL(platform_accton_as5812_54t); + EXPORT_SYMBOL(platform_accton_as5512_54x); +static struct dmi_system_id as7716_dmi_table[] = { -+ { -+ .ident = "Accton AS7716", -+ .matches = { -+ DMI_MATCH(DMI_SYS_VENDOR, "Accton"), -+ DMI_MATCH(DMI_PRODUCT_NAME, "AS7716"), -+ }, -+ } ++ { ++ .ident = "Accton AS7716", ++ .matches = { ++ DMI_MATCH(DMI_BOARD_VENDOR, "Accton"), ++ DMI_MATCH(DMI_PRODUCT_NAME, "AS7716"), ++ }, ++ } +}; + +int platform_accton_as7716_32x(void) +{ -+ return dmi_check_system(as7716_dmi_table); ++ return dmi_check_system(as7716_dmi_table); +} +EXPORT_SYMBOL(platform_accton_as7716_32x); + @@ -829,38 +827,35 @@ index acf88c9..95202ec 100644 MODULE_DESCRIPTION("accton_i2c_cpld driver"); MODULE_LICENSE("GPL"); diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig -index 599b97b..bdfb18e 100644 +index 9ba4a1b..e29de21 100644 --- a/drivers/leds/Kconfig +++ b/drivers/leds/Kconfig -@@ -88,7 +88,14 @@ config LEDS_ACCTON_AS5812_54t - help - This option enables support for the LEDs on the Accton as5812 54t. - Say Y to enable LEDs on the Accton as5812 54t. -- -+ +@@ -96,6 +96,13 @@ config LEDS_ACCTON_AS5512_54X + This option enables support for the LEDs on the Accton as5512 54x. + Say Y to enable LEDs on the Accton as5512 54x. + +config LEDS_ACCTON_AS7716_32x + tristate "LED support for the Accton as7716 32x" + depends on LEDS_CLASS && SENSORS_ACCTON_I2C_CPLD + help + This option enables support for the LEDs on the Accton as7716 32x. + Say Y to enable LEDs on the Accton as7716 32x. -+ ++ config LEDS_LM3530 tristate "LCD Backlight driver for LM3530" depends on LEDS_CLASS diff --git a/drivers/leds/Makefile b/drivers/leds/Makefile -index bd20baa..58b1a80 100644 +index ff3be6c..42f274a 100644 --- a/drivers/leds/Makefile +++ b/drivers/leds/Makefile -@@ -50,7 +50,7 @@ obj-$(CONFIG_LEDS_ACCTON_AS7712_32x) += leds-accton_as7712_32x.o - obj-$(CONFIG_LEDS_ACCTON_AS5812_54x) += leds-accton_as5812_54x.o +@@ -51,6 +51,7 @@ obj-$(CONFIG_LEDS_ACCTON_AS5812_54x) += leds-accton_as5812_54x.o obj-$(CONFIG_LEDS_ACCTON_AS6812_32x) += leds-accton_as6812_32x.o obj-$(CONFIG_LEDS_ACCTON_AS5812_54t) += leds-accton_as5812_54t.o -- + obj-$(CONFIG_LEDS_ACCTON_AS5512_54X) += leds-accton_as5512_54x.o +obj-$(CONFIG_LEDS_ACCTON_AS7716_32x) += leds-accton_as7716_32x.o + # LED SPI Drivers obj-$(CONFIG_LEDS_DAC124S085) += leds-dac124s085.o - diff --git a/drivers/leds/leds-accton_as7716_32x.c b/drivers/leds/leds-accton_as7716_32x.c new file mode 100644 index 0000000..5a84897 @@ -1311,15 +1306,13 @@ index 0000000..5a84897 +MODULE_DESCRIPTION("accton_as7716_32x_led driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/misc/eeprom/Kconfig b/drivers/misc/eeprom/Kconfig -index c75227b..3ef5125 100644 +index 70a3f59..97f811f 100644 --- a/drivers/misc/eeprom/Kconfig +++ b/drivers/misc/eeprom/Kconfig -@@ -135,7 +135,16 @@ config EEPROM_ACCTON_AS5812_54t_SFP +@@ -145,6 +145,15 @@ config EEPROM_ACCTON_AS5512_54X_SFP + This driver can also be built as a module. If so, the module will + be called accton_5512_54x_sfp. - This driver can also be built as a module. If so, the module will - be called accton_as5812_54t_sfp. -- -+ +config EEPROM_ACCTON_AS7716_32x_SFP + tristate "Accton as7716 32x sfp" + depends on I2C && SENSORS_ACCTON_I2C_CPLD @@ -1328,18 +1321,18 @@ index c75227b..3ef5125 100644 + + This driver can also be built as a module. If so, the module will + be called accton_as7716_32x_sfp. -+ ++ config EEPROM_93CX6 tristate "EEPROM 93CX6 support" help diff --git a/drivers/misc/eeprom/Makefile b/drivers/misc/eeprom/Makefile -index 152a8bc..dd47dd2 100644 +index 907f836..b59d70d 100644 --- a/drivers/misc/eeprom/Makefile +++ b/drivers/misc/eeprom/Makefile -@@ -13,4 +13,5 @@ obj-$(CONFIG_EEPROM_ACCTON_AS7712_32x_SFP) += accton_as7712_32x_sfp.o - obj-$(CONFIG_EEPROM_ACCTON_AS5812_54x_SFP) += accton_as5812_54x_sfp.o +@@ -14,4 +14,5 @@ obj-$(CONFIG_EEPROM_ACCTON_AS5812_54x_SFP) += accton_as5812_54x_sfp.o obj-$(CONFIG_EEPROM_ACCTON_AS6812_32x_SFP) += accton_as6812_32x_sfp.o obj-$(CONFIG_EEPROM_ACCTON_AS5812_54t_SFP) += accton_as5812_54t_sfp.o + obj-$(CONFIG_EEPROM_ACCTON_AS5512_54X_SFP) += accton_as5512_54x_sfp.o +obj-$(CONFIG_EEPROM_ACCTON_AS7716_32x_SFP) += accton_as7716_32x_sfp.o obj-$(CONFIG_EEPROM_SFF_8436) += sff_8436_eeprom.o diff --git a/drivers/misc/eeprom/accton_as7716_32x_sfp.c b/drivers/misc/eeprom/accton_as7716_32x_sfp.c diff --git a/packages/base/any/kernels/3.2.65-1+deb7u2/patches/series b/packages/base/any/kernels/3.2.65-1+deb7u2/patches/series index 3b224736..20fa12e4 100644 --- a/packages/base/any/kernels/3.2.65-1+deb7u2/patches/series +++ b/packages/base/any/kernels/3.2.65-1+deb7u2/patches/series @@ -246,6 +246,7 @@ platform-accton-as6812_32x-device-drivers.patch platform-accton-as5812_54t-device-drivers.patch driver-mfd-lpc-ich.patch driver-watchdog-itco-wd.patch +platform-accton-as5512_54x-device-drivers.patch platform-accton-as7716_32x-device-drivers.patch driver-broadcom-tigon3.patch mgmt-port-init-config.patch From 3d8a473b65a476f576c5b639dc60a69c1031d01d Mon Sep 17 00:00:00 2001 From: Lewis Kang Date: Thu, 12 May 2016 18:20:31 +0800 Subject: [PATCH 4/6] support installing NOS to where ONIE image resides while block device is not specified this fixes the issue when an external USB disk is inserted before powering on the switch that may change the device name of the expected installation destination (e.g. /dev/sdb becomes /dev/sdc) --- builds/amd64/installer/legacy/builds/amd64-installer.sh | 9 +++++++++ .../r0/src/lib/install/x86-64-accton-as7712-32x-r0.sh | 2 +- .../r0/src/lib/install/x86-64-accton-as7716-32x-r0.sh | 2 +- 3 files changed, 11 insertions(+), 2 deletions(-) diff --git a/builds/amd64/installer/legacy/builds/amd64-installer.sh b/builds/amd64/installer/legacy/builds/amd64-installer.sh index 86accc8f..6a92de78 100644 --- a/builds/amd64/installer/legacy/builds/amd64-installer.sh +++ b/builds/amd64/installer/legacy/builds/amd64-installer.sh @@ -275,6 +275,15 @@ partition_gpt() installer_standard_gpt_install() { DEV=$1; shift + + if [ -z $DEV ]; then + # Install on the same block device as ONIE + DEV=$(blkid | grep ONIE-BOOT | awk '{print $1}' | sed -e 's/[1-9][0-9]*:.*$//' | sed -e 's/\([0-9]\)\(p\)/\1/' | head -n 1) + [ -b "$DEV" ] || { + echo "Error: Unable to determine block device of ONIE install" + return 1 + } + fi visit_parted $DEV do_handle_disk do_handle_partitions || return 1 partition_gpt $(get_free_space) || return 1 diff --git a/packages/platforms/accton/x86-64/x86-64-accton-as7712-32x/platform-config/r0/src/lib/install/x86-64-accton-as7712-32x-r0.sh b/packages/platforms/accton/x86-64/x86-64-accton-as7712-32x/platform-config/r0/src/lib/install/x86-64-accton-as7712-32x-r0.sh index b964838f..f30823a2 100644 --- a/packages/platforms/accton/x86-64/x86-64-accton-as7712-32x/platform-config/r0/src/lib/install/x86-64-accton-as7712-32x-r0.sh +++ b/packages/platforms/accton/x86-64/x86-64-accton-as7712-32x/platform-config/r0/src/lib/install/x86-64-accton-as7712-32x-r0.sh @@ -11,5 +11,5 @@ platform_installer() { # Standard isntallation to an available GPT partition - installer_standard_gpt_install /dev/sdb + installer_standard_gpt_install } diff --git a/packages/platforms/accton/x86-64/x86-64-accton-as7716-32x/platform-config/r0/src/lib/install/x86-64-accton-as7716-32x-r0.sh b/packages/platforms/accton/x86-64/x86-64-accton-as7716-32x/platform-config/r0/src/lib/install/x86-64-accton-as7716-32x-r0.sh index b964838f..f30823a2 100644 --- a/packages/platforms/accton/x86-64/x86-64-accton-as7716-32x/platform-config/r0/src/lib/install/x86-64-accton-as7716-32x-r0.sh +++ b/packages/platforms/accton/x86-64/x86-64-accton-as7716-32x/platform-config/r0/src/lib/install/x86-64-accton-as7716-32x-r0.sh @@ -11,5 +11,5 @@ platform_installer() { # Standard isntallation to an available GPT partition - installer_standard_gpt_install /dev/sdb + installer_standard_gpt_install } From 04414276f3428c8ca52924d665afe7b68d59ab98 Mon Sep 17 00:00:00 2001 From: Lewis Kang Date: Fri, 13 May 2016 09:38:32 +0800 Subject: [PATCH 5/6] modify the comment and error message --- builds/amd64/installer/legacy/builds/amd64-installer.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/builds/amd64/installer/legacy/builds/amd64-installer.sh b/builds/amd64/installer/legacy/builds/amd64-installer.sh index 6a92de78..c2427083 100644 --- a/builds/amd64/installer/legacy/builds/amd64-installer.sh +++ b/builds/amd64/installer/legacy/builds/amd64-installer.sh @@ -277,10 +277,10 @@ installer_standard_gpt_install() DEV=$1; shift if [ -z $DEV ]; then - # Install on the same block device as ONIE + # Install NOS to the same block device as ONIE image DEV=$(blkid | grep ONIE-BOOT | awk '{print $1}' | sed -e 's/[1-9][0-9]*:.*$//' | sed -e 's/\([0-9]\)\(p\)/\1/' | head -n 1) [ -b "$DEV" ] || { - echo "Error: Unable to determine block device of ONIE install" + echo "Error: Unable to determine the block device to install NOS" return 1 } fi From 2c74660aa97b738ec9157666c8b9b5a980f199ef Mon Sep 17 00:00:00 2001 From: Lewis Kang Date: Fri, 13 May 2016 11:18:19 +0800 Subject: [PATCH 6/6] if error -> exit 1 --- builds/amd64/installer/legacy/builds/amd64-installer.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/builds/amd64/installer/legacy/builds/amd64-installer.sh b/builds/amd64/installer/legacy/builds/amd64-installer.sh index c2427083..5eac7de1 100644 --- a/builds/amd64/installer/legacy/builds/amd64-installer.sh +++ b/builds/amd64/installer/legacy/builds/amd64-installer.sh @@ -281,7 +281,7 @@ installer_standard_gpt_install() DEV=$(blkid | grep ONIE-BOOT | awk '{print $1}' | sed -e 's/[1-9][0-9]*:.*$//' | sed -e 's/\([0-9]\)\(p\)/\1/' | head -n 1) [ -b "$DEV" ] || { echo "Error: Unable to determine the block device to install NOS" - return 1 + exit 1 } fi