diff --git a/packages/base/any/kernels/3.16-lts/configs/x86_64-all/.gitignore b/packages/base/any/kernels/3.16-lts/configs/x86_64-all/.gitignore
new file mode 100644
index 00000000..5dbdc5b9
--- /dev/null
+++ b/packages/base/any/kernels/3.16-lts/configs/x86_64-all/.gitignore
@@ -0,0 +1,3 @@
+kernel-3.16*
+linux-*
+
diff --git a/packages/base/any/kernels/3.16-lts/configs/x86_64-all/Makefile b/packages/base/any/kernels/3.16-lts/configs/x86_64-all/Makefile
new file mode 100644
index 00000000..2936639a
--- /dev/null
+++ b/packages/base/any/kernels/3.16-lts/configs/x86_64-all/Makefile
@@ -0,0 +1,37 @@
+############################################################
+#
+#
+# Copyright 2015 Big Switch Networks, Inc.
+#
+# Licensed under the Eclipse Public License, Version 1.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.eclipse.org/legal/epl-v10.html
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
+# either express or implied. See the License for the specific
+# language governing permissions and limitations under the
+# License.
+#
+#
+############################################################
+THIS_DIR := $(abspath $(dir $(lastword $(MAKEFILE_LIST))))
+include $(ONL)/make/config.mk
+
+export ARCH := x86_64
+ifndef K_TARGET_DIR
+K_TARGET_DIR := $(THIS_DIR)
+endif
+
+include ../../kconfig.mk
+K_CONFIG := x86_64-all.config
+K_BUILD_TARGET := bzImage
+K_COPY_SRC := arch/x86/boot/bzImage
+ifndef K_COPY_DST
+K_COPY_DST := kernel-3.16-lts-x86_64-all
+endif
+
+include $(ONL)/make/kbuild.mk
diff --git a/packages/base/any/kernels/3.16-lts/configs/x86_64-all/x86_64-all.config b/packages/base/any/kernels/3.16-lts/configs/x86_64-all/x86_64-all.config
new file mode 100644
index 00000000..94506877
--- /dev/null
+++ b/packages/base/any/kernels/3.16-lts/configs/x86_64-all/x86_64-all.config
@@ -0,0 +1,3567 @@
+#
+# Automatically generated file; DO NOT EDIT.
+# Linux/x86_64 3.16.7-ckt25 Kernel Configuration
+#
+CONFIG_64BIT=y
+CONFIG_X86_64=y
+CONFIG_X86=y
+CONFIG_INSTRUCTION_DECODER=y
+CONFIG_OUTPUT_FORMAT="elf64-x86-64"
+CONFIG_ARCH_DEFCONFIG="arch/x86/configs/x86_64_defconfig"
+CONFIG_LOCKDEP_SUPPORT=y
+CONFIG_STACKTRACE_SUPPORT=y
+CONFIG_HAVE_LATENCYTOP_SUPPORT=y
+CONFIG_MMU=y
+CONFIG_NEED_DMA_MAP_STATE=y
+CONFIG_NEED_SG_DMA_LENGTH=y
+CONFIG_GENERIC_ISA_DMA=y
+CONFIG_GENERIC_BUG=y
+CONFIG_GENERIC_BUG_RELATIVE_POINTERS=y
+CONFIG_GENERIC_HWEIGHT=y
+CONFIG_ARCH_MAY_HAVE_PC_FDC=y
+CONFIG_RWSEM_XCHGADD_ALGORITHM=y
+CONFIG_GENERIC_CALIBRATE_DELAY=y
+CONFIG_ARCH_HAS_CPU_RELAX=y
+CONFIG_ARCH_HAS_CACHE_LINE_SIZE=y
+CONFIG_HAVE_SETUP_PER_CPU_AREA=y
+CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK=y
+CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK=y
+CONFIG_ARCH_HIBERNATION_POSSIBLE=y
+CONFIG_ARCH_SUSPEND_POSSIBLE=y
+CONFIG_ARCH_WANT_HUGE_PMD_SHARE=y
+CONFIG_ARCH_WANT_GENERAL_HUGETLB=y
+CONFIG_ZONE_DMA32=y
+CONFIG_AUDIT_ARCH=y
+CONFIG_ARCH_SUPPORTS_OPTIMIZED_INLINING=y
+CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC=y
+CONFIG_X86_64_SMP=y
+CONFIG_X86_HT=y
+CONFIG_ARCH_HWEIGHT_CFLAGS="-fcall-saved-rdi -fcall-saved-rsi -fcall-saved-rdx -fcall-saved-rcx -fcall-saved-r8 -fcall-saved-r9 -fcall-saved-r10 -fcall-saved-r11"
+CONFIG_ARCH_SUPPORTS_UPROBES=y
+CONFIG_FIX_EARLYCON_MEM=y
+CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
+CONFIG_IRQ_WORK=y
+CONFIG_BUILDTIME_EXTABLE_SORT=y
+
+#
+# General setup
+#
+CONFIG_INIT_ENV_ARG_LIMIT=32
+CONFIG_CROSS_COMPILE=""
+# CONFIG_COMPILE_TEST is not set
+CONFIG_LOCALVERSION="-OpenNetworkLinux"
+# CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_HAVE_KERNEL_GZIP=y
+CONFIG_HAVE_KERNEL_BZIP2=y
+CONFIG_HAVE_KERNEL_LZMA=y
+CONFIG_HAVE_KERNEL_XZ=y
+CONFIG_HAVE_KERNEL_LZO=y
+CONFIG_HAVE_KERNEL_LZ4=y
+# CONFIG_KERNEL_GZIP is not set
+CONFIG_KERNEL_BZIP2=y
+# CONFIG_KERNEL_LZMA is not set
+# CONFIG_KERNEL_XZ is not set
+# CONFIG_KERNEL_LZO is not set
+# CONFIG_KERNEL_LZ4 is not set
+CONFIG_DEFAULT_HOSTNAME="(none)"
+CONFIG_SWAP=y
+CONFIG_SYSVIPC=y
+CONFIG_SYSVIPC_SYSCTL=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_POSIX_MQUEUE_SYSCTL=y
+CONFIG_CROSS_MEMORY_ATTACH=y
+CONFIG_FHANDLE=y
+CONFIG_USELIB=y
+CONFIG_AUDIT=y
+CONFIG_HAVE_ARCH_AUDITSYSCALL=y
+CONFIG_AUDITSYSCALL=y
+CONFIG_AUDIT_WATCH=y
+CONFIG_AUDIT_TREE=y
+
+#
+# IRQ subsystem
+#
+CONFIG_GENERIC_IRQ_PROBE=y
+CONFIG_GENERIC_IRQ_SHOW=y
+CONFIG_GENERIC_IRQ_LEGACY_ALLOC_HWIRQ=y
+CONFIG_GENERIC_PENDING_IRQ=y
+CONFIG_IRQ_DOMAIN=y
+# CONFIG_IRQ_DOMAIN_DEBUG is not set
+CONFIG_IRQ_FORCED_THREADING=y
+CONFIG_SPARSE_IRQ=y
+CONFIG_CLOCKSOURCE_WATCHDOG=y
+CONFIG_ARCH_CLOCKSOURCE_DATA=y
+CONFIG_GENERIC_TIME_VSYSCALL=y
+CONFIG_GENERIC_CLOCKEVENTS=y
+CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
+CONFIG_GENERIC_CLOCKEVENTS_BROADCAST=y
+CONFIG_GENERIC_CLOCKEVENTS_MIN_ADJUST=y
+CONFIG_GENERIC_CMOS_UPDATE=y
+
+#
+# Timers subsystem
+#
+CONFIG_TICK_ONESHOT=y
+CONFIG_NO_HZ_COMMON=y
+# CONFIG_HZ_PERIODIC is not set
+CONFIG_NO_HZ_IDLE=y
+# CONFIG_NO_HZ_FULL is not set
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+
+#
+# CPU/Task time and stats accounting
+#
+CONFIG_TICK_CPU_ACCOUNTING=y
+# CONFIG_VIRT_CPU_ACCOUNTING_GEN is not set
+# CONFIG_IRQ_TIME_ACCOUNTING is not set
+CONFIG_BSD_PROCESS_ACCT=y
+CONFIG_BSD_PROCESS_ACCT_V3=y
+CONFIG_TASKSTATS=y
+CONFIG_TASK_DELAY_ACCT=y
+CONFIG_TASK_XACCT=y
+CONFIG_TASK_IO_ACCOUNTING=y
+
+#
+# RCU Subsystem
+#
+CONFIG_TREE_RCU=y
+# CONFIG_PREEMPT_RCU is not set
+CONFIG_RCU_STALL_COMMON=y
+# CONFIG_RCU_USER_QS is not set
+CONFIG_RCU_FANOUT=64
+CONFIG_RCU_FANOUT_LEAF=16
+# CONFIG_RCU_FANOUT_EXACT is not set
+CONFIG_RCU_FAST_NO_HZ=y
+# CONFIG_TREE_RCU_TRACE is not set
+# CONFIG_RCU_NOCB_CPU is not set
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_BUF_SHIFT=17
+CONFIG_HAVE_UNSTABLE_SCHED_CLOCK=y
+CONFIG_ARCH_SUPPORTS_NUMA_BALANCING=y
+CONFIG_ARCH_SUPPORTS_INT128=y
+CONFIG_ARCH_WANTS_PROT_NUMA_PROT_NONE=y
+CONFIG_CGROUPS=y
+# CONFIG_CGROUP_DEBUG is not set
+CONFIG_CGROUP_FREEZER=y
+CONFIG_CGROUP_DEVICE=y
+CONFIG_CPUSETS=y
+CONFIG_PROC_PID_CPUSET=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_RESOURCE_COUNTERS=y
+CONFIG_MEMCG=y
+# CONFIG_MEMCG_DISABLED is not set
+CONFIG_MEMCG_SWAP=y
+CONFIG_MEMCG_SWAP_ENABLED=y
+CONFIG_MEMCG_KMEM=y
+# CONFIG_CGROUP_HUGETLB is not set
+CONFIG_CGROUP_PERF=y
+CONFIG_CGROUP_SCHED=y
+CONFIG_FAIR_GROUP_SCHED=y
+# CONFIG_CFS_BANDWIDTH is not set
+# CONFIG_RT_GROUP_SCHED is not set
+CONFIG_BLK_CGROUP=y
+# CONFIG_DEBUG_BLK_CGROUP is not set
+# CONFIG_CHECKPOINT_RESTORE is not set
+CONFIG_NAMESPACES=y
+CONFIG_UTS_NS=y
+CONFIG_IPC_NS=y
+CONFIG_USER_NS=y
+CONFIG_PID_NS=y
+CONFIG_NET_NS=y
+CONFIG_SCHED_AUTOGROUP=y
+# CONFIG_SYSFS_DEPRECATED is not set
+CONFIG_RELAY=y
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_INITRAMFS_SOURCE=""
+CONFIG_RD_GZIP=y
+CONFIG_RD_BZIP2=y
+CONFIG_RD_LZMA=y
+CONFIG_RD_XZ=y
+CONFIG_RD_LZO=y
+# CONFIG_RD_LZ4 is not set
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
+CONFIG_SYSCTL=y
+CONFIG_ANON_INODES=y
+CONFIG_HAVE_UID16=y
+CONFIG_SYSCTL_EXCEPTION_TRACE=y
+CONFIG_HAVE_PCSPKR_PLATFORM=y
+CONFIG_EXPERT=y
+CONFIG_UID16=y
+CONFIG_SGETMASK_SYSCALL=y
+CONFIG_SYSFS_SYSCALL=y
+# CONFIG_SYSCTL_SYSCALL is not set
+CONFIG_KALLSYMS=y
+# CONFIG_KALLSYMS_ALL is not set
+CONFIG_PRINTK=y
+CONFIG_BUG=y
+CONFIG_ELF_CORE=y
+CONFIG_PCSPKR_PLATFORM=y
+CONFIG_BASE_FULL=y
+CONFIG_FUTEX=y
+CONFIG_EPOLL=y
+CONFIG_SIGNALFD=y
+CONFIG_TIMERFD=y
+CONFIG_EVENTFD=y
+CONFIG_SHMEM=y
+CONFIG_AIO=y
+CONFIG_PCI_QUIRKS=y
+CONFIG_EMBEDDED=y
+CONFIG_HAVE_PERF_EVENTS=y
+
+#
+# Kernel Performance Events And Counters
+#
+CONFIG_PERF_EVENTS=y
+# CONFIG_DEBUG_PERF_USE_VMALLOC is not set
+CONFIG_VM_EVENT_COUNTERS=y
+# CONFIG_COMPAT_BRK is not set
+CONFIG_SLAB=y
+# CONFIG_SLUB is not set
+# CONFIG_SLOB is not set
+# CONFIG_SYSTEM_TRUSTED_KEYRING is not set
+# CONFIG_PROFILING is not set
+CONFIG_TRACEPOINTS=y
+CONFIG_HAVE_OPROFILE=y
+CONFIG_OPROFILE_NMI_TIMER=y
+# CONFIG_KPROBES is not set
+# CONFIG_JUMP_LABEL is not set
+# CONFIG_UPROBES is not set
+# CONFIG_HAVE_64BIT_ALIGNED_ACCESS is not set
+CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS=y
+CONFIG_ARCH_USE_BUILTIN_BSWAP=y
+CONFIG_HAVE_IOREMAP_PROT=y
+CONFIG_HAVE_KPROBES=y
+CONFIG_HAVE_KRETPROBES=y
+CONFIG_HAVE_OPTPROBES=y
+CONFIG_HAVE_KPROBES_ON_FTRACE=y
+CONFIG_HAVE_ARCH_TRACEHOOK=y
+CONFIG_HAVE_DMA_ATTRS=y
+CONFIG_HAVE_DMA_CONTIGUOUS=y
+CONFIG_GENERIC_SMP_IDLE_THREAD=y
+CONFIG_HAVE_REGS_AND_STACK_ACCESS_API=y
+CONFIG_HAVE_DMA_API_DEBUG=y
+CONFIG_HAVE_HW_BREAKPOINT=y
+CONFIG_HAVE_MIXED_BREAKPOINTS_REGS=y
+CONFIG_HAVE_USER_RETURN_NOTIFIER=y
+CONFIG_HAVE_PERF_EVENTS_NMI=y
+CONFIG_HAVE_PERF_REGS=y
+CONFIG_HAVE_PERF_USER_STACK_DUMP=y
+CONFIG_HAVE_ARCH_JUMP_LABEL=y
+CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG=y
+CONFIG_HAVE_CMPXCHG_LOCAL=y
+CONFIG_HAVE_CMPXCHG_DOUBLE=y
+CONFIG_ARCH_WANT_COMPAT_IPC_PARSE_VERSION=y
+CONFIG_ARCH_WANT_OLD_COMPAT_IPC=y
+CONFIG_HAVE_ARCH_SECCOMP_FILTER=y
+CONFIG_SECCOMP_FILTER=y
+CONFIG_HAVE_CC_STACKPROTECTOR=y
+# CONFIG_CC_STACKPROTECTOR is not set
+CONFIG_CC_STACKPROTECTOR_NONE=y
+# CONFIG_CC_STACKPROTECTOR_REGULAR is not set
+# CONFIG_CC_STACKPROTECTOR_STRONG is not set
+CONFIG_HAVE_CONTEXT_TRACKING=y
+CONFIG_HAVE_VIRT_CPU_ACCOUNTING_GEN=y
+CONFIG_HAVE_IRQ_TIME_ACCOUNTING=y
+CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE=y
+CONFIG_HAVE_ARCH_SOFT_DIRTY=y
+CONFIG_MODULES_USE_ELF_RELA=y
+CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK=y
+CONFIG_OLD_SIGSUSPEND3=y
+CONFIG_COMPAT_OLD_SIGACTION=y
+
+#
+# GCOV-based kernel profiling
+#
+# CONFIG_GCOV_KERNEL is not set
+# CONFIG_HAVE_GENERIC_DMA_COHERENT is not set
+CONFIG_SLABINFO=y
+CONFIG_RT_MUTEXES=y
+CONFIG_BASE_SMALL=0
+CONFIG_MODULES=y
+# CONFIG_MODULE_FORCE_LOAD is not set
+CONFIG_MODULE_UNLOAD=y
+# CONFIG_MODULE_FORCE_UNLOAD is not set
+# CONFIG_MODVERSIONS is not set
+# CONFIG_MODULE_SRCVERSION_ALL is not set
+# CONFIG_MODULE_SIG is not set
+CONFIG_STOP_MACHINE=y
+CONFIG_BLOCK=y
+CONFIG_BLK_DEV_BSG=y
+CONFIG_BLK_DEV_BSGLIB=y
+CONFIG_BLK_DEV_INTEGRITY=y
+# CONFIG_BLK_DEV_THROTTLING is not set
+# CONFIG_BLK_CMDLINE_PARSER is not set
+
+#
+# Partition Types
+#
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_ACORN_PARTITION=y
+CONFIG_ACORN_PARTITION_CUMANA=y
+CONFIG_ACORN_PARTITION_EESOX=y
+CONFIG_ACORN_PARTITION_ICS=y
+CONFIG_ACORN_PARTITION_ADFS=y
+CONFIG_ACORN_PARTITION_POWERTEC=y
+CONFIG_ACORN_PARTITION_RISCIX=y
+# CONFIG_AIX_PARTITION is not set
+CONFIG_OSF_PARTITION=y
+CONFIG_AMIGA_PARTITION=y
+CONFIG_ATARI_PARTITION=y
+CONFIG_MAC_PARTITION=y
+CONFIG_MSDOS_PARTITION=y
+CONFIG_BSD_DISKLABEL=y
+CONFIG_MINIX_SUBPARTITION=y
+CONFIG_SOLARIS_X86_PARTITION=y
+CONFIG_UNIXWARE_DISKLABEL=y
+CONFIG_LDM_PARTITION=y
+# CONFIG_LDM_DEBUG is not set
+CONFIG_SGI_PARTITION=y
+CONFIG_ULTRIX_PARTITION=y
+CONFIG_SUN_PARTITION=y
+CONFIG_KARMA_PARTITION=y
+CONFIG_EFI_PARTITION=y
+# CONFIG_SYSV68_PARTITION is not set
+# CONFIG_CMDLINE_PARTITION is not set
+CONFIG_BLOCK_COMPAT=y
+
+#
+# IO Schedulers
+#
+CONFIG_IOSCHED_NOOP=y
+CONFIG_IOSCHED_DEADLINE=y
+CONFIG_IOSCHED_CFQ=y
+CONFIG_CFQ_GROUP_IOSCHED=y
+# CONFIG_DEFAULT_DEADLINE is not set
+CONFIG_DEFAULT_CFQ=y
+# CONFIG_DEFAULT_NOOP is not set
+CONFIG_DEFAULT_IOSCHED="cfq"
+CONFIG_INLINE_SPIN_UNLOCK_IRQ=y
+CONFIG_INLINE_READ_UNLOCK=y
+CONFIG_INLINE_READ_UNLOCK_IRQ=y
+CONFIG_INLINE_WRITE_UNLOCK=y
+CONFIG_INLINE_WRITE_UNLOCK_IRQ=y
+CONFIG_ARCH_SUPPORTS_ATOMIC_RMW=y
+CONFIG_MUTEX_SPIN_ON_OWNER=y
+CONFIG_RWSEM_SPIN_ON_OWNER=y
+CONFIG_ARCH_USE_QUEUE_RWLOCK=y
+CONFIG_QUEUE_RWLOCK=y
+CONFIG_FREEZER=y
+
+#
+# Processor type and features
+#
+CONFIG_ZONE_DMA=y
+CONFIG_SMP=y
+CONFIG_X86_MPPARSE=y
+# CONFIG_X86_EXTENDED_PLATFORM is not set
+# CONFIG_X86_INTEL_LPSS is not set
+CONFIG_X86_SUPPORTS_MEMORY_FAILURE=y
+CONFIG_SCHED_OMIT_FRAME_POINTER=y
+# CONFIG_HYPERVISOR_GUEST is not set
+CONFIG_NO_BOOTMEM=y
+CONFIG_MEMTEST=y
+# CONFIG_MK8 is not set
+# CONFIG_MPSC is not set
+# CONFIG_MCORE2 is not set
+# CONFIG_MATOM is not set
+CONFIG_GENERIC_CPU=y
+CONFIG_X86_INTERNODE_CACHE_SHIFT=6
+CONFIG_X86_L1_CACHE_SHIFT=6
+CONFIG_X86_TSC=y
+CONFIG_X86_CMPXCHG64=y
+CONFIG_X86_CMOV=y
+CONFIG_X86_MINIMUM_CPU_FAMILY=64
+CONFIG_X86_DEBUGCTLMSR=y
+# CONFIG_PROCESSOR_SELECT is not set
+CONFIG_CPU_SUP_INTEL=y
+CONFIG_CPU_SUP_AMD=y
+CONFIG_CPU_SUP_CENTAUR=y
+CONFIG_HPET_TIMER=y
+CONFIG_HPET_EMULATE_RTC=y
+CONFIG_DMI=y
+CONFIG_GART_IOMMU=y
+CONFIG_CALGARY_IOMMU=y
+CONFIG_CALGARY_IOMMU_ENABLED_BY_DEFAULT=y
+CONFIG_SWIOTLB=y
+CONFIG_IOMMU_HELPER=y
+# CONFIG_MAXSMP is not set
+CONFIG_NR_CPUS=512
+CONFIG_SCHED_SMT=y
+CONFIG_SCHED_MC=y
+# CONFIG_PREEMPT_NONE is not set
+CONFIG_PREEMPT_VOLUNTARY=y
+# CONFIG_PREEMPT is not set
+CONFIG_X86_UP_APIC_MSI=y
+CONFIG_X86_LOCAL_APIC=y
+CONFIG_X86_IO_APIC=y
+CONFIG_X86_REROUTE_FOR_BROKEN_BOOT_IRQS=y
+CONFIG_X86_MCE=y
+CONFIG_X86_MCE_INTEL=y
+CONFIG_X86_MCE_AMD=y
+CONFIG_X86_MCE_THRESHOLD=y
+# CONFIG_X86_MCE_INJECT is not set
+CONFIG_X86_THERMAL_VECTOR=y
+# CONFIG_X86_16BIT is not set
+# CONFIG_I8K is not set
+# CONFIG_MICROCODE is not set
+# CONFIG_MICROCODE_INTEL_EARLY is not set
+# CONFIG_MICROCODE_AMD_EARLY is not set
+CONFIG_X86_MSR=y
+CONFIG_X86_CPUID=y
+CONFIG_ARCH_PHYS_ADDR_T_64BIT=y
+CONFIG_ARCH_DMA_ADDR_T_64BIT=y
+CONFIG_DIRECT_GBPAGES=y
+# CONFIG_NUMA is not set
+CONFIG_ARCH_SPARSEMEM_ENABLE=y
+CONFIG_ARCH_SPARSEMEM_DEFAULT=y
+CONFIG_ARCH_SELECT_MEMORY_MODEL=y
+CONFIG_ARCH_MEMORY_PROBE=y
+CONFIG_ARCH_PROC_KCORE_TEXT=y
+CONFIG_ILLEGAL_POINTER_VALUE=0xdead000000000000
+CONFIG_SELECT_MEMORY_MODEL=y
+CONFIG_SPARSEMEM_MANUAL=y
+CONFIG_SPARSEMEM=y
+CONFIG_HAVE_MEMORY_PRESENT=y
+CONFIG_SPARSEMEM_EXTREME=y
+CONFIG_SPARSEMEM_VMEMMAP_ENABLE=y
+CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER=y
+CONFIG_SPARSEMEM_VMEMMAP=y
+CONFIG_HAVE_MEMBLOCK=y
+CONFIG_HAVE_MEMBLOCK_NODE_MAP=y
+CONFIG_ARCH_DISCARD_MEMBLOCK=y
+CONFIG_MEMORY_ISOLATION=y
+CONFIG_HAVE_BOOTMEM_INFO_NODE=y
+CONFIG_MEMORY_HOTPLUG=y
+CONFIG_MEMORY_HOTPLUG_SPARSE=y
+CONFIG_MEMORY_HOTREMOVE=y
+CONFIG_PAGEFLAGS_EXTENDED=y
+CONFIG_SPLIT_PTLOCK_CPUS=4
+CONFIG_ARCH_ENABLE_SPLIT_PMD_PTLOCK=y
+CONFIG_COMPACTION=y
+CONFIG_MIGRATION=y
+CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION=y
+CONFIG_PHYS_ADDR_T_64BIT=y
+CONFIG_ZONE_DMA_FLAG=1
+CONFIG_BOUNCE=y
+CONFIG_NEED_BOUNCE_POOL=y
+CONFIG_VIRT_TO_BUS=y
+CONFIG_KSM=y
+CONFIG_DEFAULT_MMAP_MIN_ADDR=65536
+CONFIG_ARCH_SUPPORTS_MEMORY_FAILURE=y
+CONFIG_MEMORY_FAILURE=y
+CONFIG_HWPOISON_INJECT=y
+CONFIG_TRANSPARENT_HUGEPAGE=y
+# CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS is not set
+CONFIG_TRANSPARENT_HUGEPAGE_MADVISE=y
+# CONFIG_CLEANCACHE is not set
+# CONFIG_FRONTSWAP is not set
+# CONFIG_CMA is not set
+# CONFIG_ZBUD is not set
+# CONFIG_ZSMALLOC is not set
+CONFIG_GENERIC_EARLY_IOREMAP=y
+# CONFIG_X86_CHECK_BIOS_CORRUPTION is not set
+CONFIG_X86_RESERVE_LOW=64
+CONFIG_MTRR=y
+CONFIG_MTRR_SANITIZER=y
+CONFIG_MTRR_SANITIZER_ENABLE_DEFAULT=0
+CONFIG_MTRR_SANITIZER_SPARE_REG_NR_DEFAULT=1
+CONFIG_X86_PAT=y
+CONFIG_ARCH_USES_PG_UNCACHED=y
+CONFIG_ARCH_RANDOM=y
+CONFIG_X86_SMAP=y
+# CONFIG_EFI is not set
+CONFIG_SECCOMP=y
+# CONFIG_HZ_100 is not set
+CONFIG_HZ_250=y
+# CONFIG_HZ_300 is not set
+# CONFIG_HZ_1000 is not set
+CONFIG_HZ=250
+CONFIG_SCHED_HRTICK=y
+CONFIG_KEXEC=y
+CONFIG_CRASH_DUMP=y
+CONFIG_PHYSICAL_START=0x1000000
+CONFIG_RELOCATABLE=y
+# CONFIG_RANDOMIZE_BASE is not set
+CONFIG_PHYSICAL_ALIGN=0x1000000
+CONFIG_HOTPLUG_CPU=y
+# CONFIG_BOOTPARAM_HOTPLUG_CPU0 is not set
+# CONFIG_DEBUG_HOTPLUG_CPU0 is not set
+# CONFIG_COMPAT_VDSO is not set
+# CONFIG_CMDLINE_BOOL is not set
+CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y
+CONFIG_ARCH_ENABLE_MEMORY_HOTREMOVE=y
+
+#
+# Power management and ACPI options
+#
+# CONFIG_SUSPEND is not set
+# CONFIG_HIBERNATION is not set
+# CONFIG_PM_RUNTIME is not set
+CONFIG_ACPI=y
+# CONFIG_ACPI_PROCFS_POWER is not set
+# CONFIG_ACPI_EC_DEBUGFS is not set
+CONFIG_ACPI_AC=y
+CONFIG_ACPI_BATTERY=y
+CONFIG_ACPI_BUTTON=y
+CONFIG_ACPI_FAN=y
+# CONFIG_ACPI_DOCK is not set
+CONFIG_ACPI_PROCESSOR=y
+CONFIG_ACPI_HOTPLUG_CPU=y
+# CONFIG_ACPI_PROCESSOR_AGGREGATOR is not set
+CONFIG_ACPI_THERMAL=y
+# CONFIG_ACPI_CUSTOM_DSDT is not set
+# CONFIG_ACPI_INITRD_TABLE_OVERRIDE is not set
+# CONFIG_ACPI_DEBUG is not set
+# CONFIG_ACPI_PCI_SLOT is not set
+CONFIG_X86_PM_TIMER=y
+CONFIG_ACPI_CONTAINER=y
+# CONFIG_ACPI_HOTPLUG_MEMORY is not set
+# CONFIG_ACPI_SBS is not set
+# CONFIG_ACPI_HED is not set
+CONFIG_ACPI_CUSTOM_METHOD=y
+# CONFIG_ACPI_REDUCED_HARDWARE_ONLY is not set
+# CONFIG_ACPI_APEI is not set
+# CONFIG_ACPI_EXTLOG is not set
+# CONFIG_SFI is not set
+
+#
+# CPU Frequency scaling
+#
+CONFIG_CPU_FREQ=y
+CONFIG_CPU_FREQ_GOV_COMMON=y
+CONFIG_CPU_FREQ_STAT=y
+# CONFIG_CPU_FREQ_STAT_DETAILS is not set
+# CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE is not set
+# CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE is not set
+# CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE is not set
+CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
+# CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE is not set
+CONFIG_CPU_FREQ_GOV_PERFORMANCE=y
+CONFIG_CPU_FREQ_GOV_POWERSAVE=y
+CONFIG_CPU_FREQ_GOV_USERSPACE=y
+CONFIG_CPU_FREQ_GOV_ONDEMAND=y
+CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
+
+#
+# x86 CPU frequency scaling drivers
+#
+# CONFIG_X86_INTEL_PSTATE is not set
+# CONFIG_X86_PCC_CPUFREQ is not set
+# CONFIG_X86_ACPI_CPUFREQ is not set
+# CONFIG_X86_SPEEDSTEP_CENTRINO is not set
+CONFIG_X86_P4_CLOCKMOD=y
+
+#
+# shared options
+#
+CONFIG_X86_SPEEDSTEP_LIB=y
+
+#
+# CPU Idle
+#
+CONFIG_CPU_IDLE=y
+# CONFIG_CPU_IDLE_MULTIPLE_DRIVERS is not set
+CONFIG_CPU_IDLE_GOV_LADDER=y
+CONFIG_CPU_IDLE_GOV_MENU=y
+# CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED is not set
+CONFIG_INTEL_IDLE=y
+
+#
+# Memory power savings
+#
+CONFIG_I7300_IDLE_IOAT_CHANNEL=y
+CONFIG_I7300_IDLE=y
+
+#
+# Bus options (PCI etc.)
+#
+CONFIG_PCI=y
+CONFIG_PCI_DIRECT=y
+CONFIG_PCI_MMCONFIG=y
+CONFIG_PCI_DOMAINS=y
+# CONFIG_PCI_CNB20LE_QUIRK is not set
+CONFIG_PCIEPORTBUS=y
+CONFIG_HOTPLUG_PCI_PCIE=y
+CONFIG_PCIEAER=y
+# CONFIG_PCIE_ECRC is not set
+CONFIG_PCIEAER_INJECT=y
+CONFIG_PCIEASPM=y
+# CONFIG_PCIEASPM_DEBUG is not set
+CONFIG_PCIEASPM_DEFAULT=y
+# CONFIG_PCIEASPM_POWERSAVE is not set
+# CONFIG_PCIEASPM_PERFORMANCE is not set
+CONFIG_PCI_MSI=y
+# CONFIG_PCI_DEBUG is not set
+# CONFIG_PCI_REALLOC_ENABLE_AUTO is not set
+# CONFIG_PCI_STUB is not set
+CONFIG_HT_IRQ=y
+CONFIG_PCI_ATS=y
+CONFIG_PCI_IOV=y
+# CONFIG_PCI_PRI is not set
+# CONFIG_PCI_PASID is not set
+CONFIG_PCI_IOAPIC=y
+CONFIG_PCI_LABEL=y
+
+#
+# PCI host controller drivers
+#
+CONFIG_ISA_DMA_API=y
+CONFIG_AMD_NB=y
+CONFIG_PCCARD=y
+CONFIG_PCMCIA=y
+CONFIG_PCMCIA_LOAD_CIS=y
+CONFIG_CARDBUS=y
+
+#
+# PC-card bridges
+#
+# CONFIG_YENTA is not set
+CONFIG_PD6729=y
+CONFIG_I82092=y
+CONFIG_PCCARD_NONSTATIC=y
+CONFIG_HOTPLUG_PCI=y
+# CONFIG_HOTPLUG_PCI_ACPI is not set
+CONFIG_HOTPLUG_PCI_CPCI=y
+CONFIG_HOTPLUG_PCI_CPCI_ZT5550=y
+CONFIG_HOTPLUG_PCI_CPCI_GENERIC=y
+CONFIG_HOTPLUG_PCI_SHPC=y
+# CONFIG_RAPIDIO is not set
+# CONFIG_X86_SYSFB is not set
+
+#
+# Executable file formats / Emulations
+#
+CONFIG_BINFMT_ELF=y
+CONFIG_COMPAT_BINFMT_ELF=y
+CONFIG_ARCH_BINFMT_ELF_RANDOMIZE_PIE=y
+CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y
+CONFIG_BINFMT_SCRIPT=y
+# CONFIG_HAVE_AOUT is not set
+CONFIG_BINFMT_MISC=y
+CONFIG_COREDUMP=y
+CONFIG_IA32_EMULATION=y
+CONFIG_IA32_AOUT=y
+# CONFIG_X86_X32 is not set
+CONFIG_COMPAT=y
+CONFIG_COMPAT_FOR_U64_ALIGNMENT=y
+CONFIG_SYSVIPC_COMPAT=y
+CONFIG_KEYS_COMPAT=y
+CONFIG_X86_DEV_DMA_OPS=y
+CONFIG_IOSF_MBI=y
+CONFIG_NET=y
+
+#
+# Networking options
+#
+CONFIG_PACKET=y
+# CONFIG_PACKET_DIAG is not set
+CONFIG_UNIX=y
+# CONFIG_UNIX_DIAG is not set
+CONFIG_XFRM=y
+CONFIG_XFRM_ALGO=y
+CONFIG_XFRM_USER=y
+CONFIG_XFRM_SUB_POLICY=y
+CONFIG_XFRM_MIGRATE=y
+# CONFIG_XFRM_STATISTICS is not set
+CONFIG_XFRM_IPCOMP=y
+CONFIG_NET_KEY=y
+CONFIG_NET_KEY_MIGRATE=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_FIB_TRIE_STATS=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IP_ROUTE_MULTIPATH=y
+CONFIG_IP_ROUTE_VERBOSE=y
+CONFIG_IP_ROUTE_CLASSID=y
+# CONFIG_IP_PNP is not set
+# CONFIG_NET_IPIP is not set
+# CONFIG_NET_IPGRE_DEMUX is not set
+CONFIG_NET_IP_TUNNEL=y
+# CONFIG_IP_MROUTE is not set
+# CONFIG_SYN_COOKIES is not set
+# CONFIG_INET_AH is not set
+# CONFIG_INET_ESP is not set
+# CONFIG_INET_IPCOMP is not set
+# CONFIG_INET_XFRM_TUNNEL is not set
+CONFIG_INET_TUNNEL=y
+# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_BEET is not set
+CONFIG_INET_LRO=y
+CONFIG_INET_DIAG=y
+CONFIG_INET_TCP_DIAG=y
+# CONFIG_INET_UDP_DIAG is not set
+CONFIG_TCP_CONG_ADVANCED=y
+CONFIG_TCP_CONG_BIC=y
+CONFIG_TCP_CONG_CUBIC=y
+CONFIG_TCP_CONG_WESTWOOD=y
+CONFIG_TCP_CONG_HTCP=y
+CONFIG_TCP_CONG_HSTCP=y
+CONFIG_TCP_CONG_HYBLA=y
+CONFIG_TCP_CONG_VEGAS=y
+CONFIG_TCP_CONG_SCALABLE=y
+CONFIG_TCP_CONG_LP=y
+CONFIG_TCP_CONG_VENO=y
+CONFIG_TCP_CONG_YEAH=y
+CONFIG_TCP_CONG_ILLINOIS=y
+# CONFIG_DEFAULT_BIC is not set
+CONFIG_DEFAULT_CUBIC=y
+# CONFIG_DEFAULT_HTCP is not set
+# CONFIG_DEFAULT_HYBLA is not set
+# CONFIG_DEFAULT_VEGAS is not set
+# CONFIG_DEFAULT_VENO is not set
+# CONFIG_DEFAULT_WESTWOOD is not set
+# CONFIG_DEFAULT_RENO is not set
+CONFIG_DEFAULT_TCP_CONG="cubic"
+CONFIG_TCP_MD5SIG=y
+CONFIG_IPV6=y
+CONFIG_IPV6_ROUTER_PREF=y
+CONFIG_IPV6_ROUTE_INFO=y
+CONFIG_IPV6_OPTIMISTIC_DAD=y
+CONFIG_INET6_AH=y
+CONFIG_INET6_ESP=y
+CONFIG_INET6_IPCOMP=y
+CONFIG_IPV6_MIP6=y
+CONFIG_INET6_XFRM_TUNNEL=y
+CONFIG_INET6_TUNNEL=y
+CONFIG_INET6_XFRM_MODE_TRANSPORT=y
+CONFIG_INET6_XFRM_MODE_TUNNEL=y
+CONFIG_INET6_XFRM_MODE_BEET=y
+CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=y
+# CONFIG_IPV6_VTI is not set
+CONFIG_IPV6_SIT=y
+CONFIG_IPV6_SIT_6RD=y
+CONFIG_IPV6_NDISC_NODETYPE=y
+CONFIG_IPV6_TUNNEL=y
+# CONFIG_IPV6_GRE is not set
+CONFIG_IPV6_MULTIPLE_TABLES=y
+CONFIG_IPV6_SUBTREES=y
+CONFIG_IPV6_MROUTE=y
+CONFIG_IPV6_MROUTE_MULTIPLE_TABLES=y
+CONFIG_IPV6_PIMSM_V2=y
+CONFIG_NETWORK_SECMARK=y
+CONFIG_NET_PTP_CLASSIFY=y
+# CONFIG_NETWORK_PHY_TIMESTAMPING is not set
+CONFIG_NETFILTER=y
+# CONFIG_NETFILTER_DEBUG is not set
+CONFIG_NETFILTER_ADVANCED=y
+CONFIG_BRIDGE_NETFILTER=y
+
+#
+# Core Netfilter Configuration
+#
+CONFIG_NETFILTER_NETLINK=y
+CONFIG_NETFILTER_NETLINK_ACCT=y
+CONFIG_NETFILTER_NETLINK_QUEUE=y
+CONFIG_NETFILTER_NETLINK_LOG=y
+CONFIG_NF_CONNTRACK=y
+CONFIG_NF_CONNTRACK_MARK=y
+CONFIG_NF_CONNTRACK_SECMARK=y
+CONFIG_NF_CONNTRACK_ZONES=y
+CONFIG_NF_CONNTRACK_PROCFS=y
+CONFIG_NF_CONNTRACK_EVENTS=y
+# CONFIG_NF_CONNTRACK_TIMEOUT is not set
+CONFIG_NF_CONNTRACK_TIMESTAMP=y
+CONFIG_NF_CONNTRACK_LABELS=y
+CONFIG_NF_CT_PROTO_DCCP=y
+CONFIG_NF_CT_PROTO_GRE=y
+CONFIG_NF_CT_PROTO_SCTP=y
+CONFIG_NF_CT_PROTO_UDPLITE=y
+CONFIG_NF_CONNTRACK_AMANDA=y
+CONFIG_NF_CONNTRACK_FTP=y
+CONFIG_NF_CONNTRACK_H323=y
+CONFIG_NF_CONNTRACK_IRC=y
+CONFIG_NF_CONNTRACK_BROADCAST=y
+CONFIG_NF_CONNTRACK_NETBIOS_NS=y
+CONFIG_NF_CONNTRACK_SNMP=y
+CONFIG_NF_CONNTRACK_PPTP=y
+CONFIG_NF_CONNTRACK_SANE=y
+CONFIG_NF_CONNTRACK_SIP=y
+CONFIG_NF_CONNTRACK_TFTP=y
+CONFIG_NF_CT_NETLINK=y
+CONFIG_NF_CT_NETLINK_TIMEOUT=y
+CONFIG_NF_CT_NETLINK_HELPER=y
+CONFIG_NETFILTER_NETLINK_QUEUE_CT=y
+CONFIG_NF_NAT=y
+CONFIG_NF_NAT_NEEDED=y
+CONFIG_NF_NAT_PROTO_DCCP=y
+CONFIG_NF_NAT_PROTO_UDPLITE=y
+CONFIG_NF_NAT_PROTO_SCTP=y
+CONFIG_NF_NAT_AMANDA=y
+CONFIG_NF_NAT_FTP=y
+CONFIG_NF_NAT_IRC=y
+CONFIG_NF_NAT_SIP=y
+CONFIG_NF_NAT_TFTP=y
+# CONFIG_NF_TABLES is not set
+CONFIG_NETFILTER_XTABLES=y
+
+#
+# Xtables combined modules
+#
+CONFIG_NETFILTER_XT_MARK=y
+CONFIG_NETFILTER_XT_CONNMARK=y
+CONFIG_NETFILTER_XT_SET=y
+
+#
+# Xtables targets
+#
+CONFIG_NETFILTER_XT_TARGET_AUDIT=y
+CONFIG_NETFILTER_XT_TARGET_CHECKSUM=y
+CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y
+CONFIG_NETFILTER_XT_TARGET_CONNMARK=y
+CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=y
+CONFIG_NETFILTER_XT_TARGET_CT=y
+CONFIG_NETFILTER_XT_TARGET_DSCP=y
+CONFIG_NETFILTER_XT_TARGET_HL=y
+CONFIG_NETFILTER_XT_TARGET_HMARK=y
+CONFIG_NETFILTER_XT_TARGET_IDLETIMER=y
+# CONFIG_NETFILTER_XT_TARGET_LED is not set
+CONFIG_NETFILTER_XT_TARGET_LOG=y
+CONFIG_NETFILTER_XT_TARGET_MARK=y
+CONFIG_NETFILTER_XT_TARGET_NETMAP=y
+CONFIG_NETFILTER_XT_TARGET_NFLOG=y
+CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y
+CONFIG_NETFILTER_XT_TARGET_NOTRACK=y
+CONFIG_NETFILTER_XT_TARGET_RATEEST=y
+CONFIG_NETFILTER_XT_TARGET_REDIRECT=y
+CONFIG_NETFILTER_XT_TARGET_TEE=y
+CONFIG_NETFILTER_XT_TARGET_TPROXY=y
+CONFIG_NETFILTER_XT_TARGET_TRACE=y
+CONFIG_NETFILTER_XT_TARGET_SECMARK=y
+CONFIG_NETFILTER_XT_TARGET_TCPMSS=y
+CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=y
+
+#
+# Xtables matches
+#
+CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=y
+CONFIG_NETFILTER_XT_MATCH_BPF=y
+CONFIG_NETFILTER_XT_MATCH_CGROUP=y
+CONFIG_NETFILTER_XT_MATCH_CLUSTER=y
+CONFIG_NETFILTER_XT_MATCH_COMMENT=y
+CONFIG_NETFILTER_XT_MATCH_CONNBYTES=y
+CONFIG_NETFILTER_XT_MATCH_CONNLABEL=y
+CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y
+CONFIG_NETFILTER_XT_MATCH_CONNMARK=y
+CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y
+CONFIG_NETFILTER_XT_MATCH_CPU=y
+CONFIG_NETFILTER_XT_MATCH_DCCP=y
+CONFIG_NETFILTER_XT_MATCH_DEVGROUP=y
+CONFIG_NETFILTER_XT_MATCH_DSCP=y
+CONFIG_NETFILTER_XT_MATCH_ECN=y
+CONFIG_NETFILTER_XT_MATCH_ESP=y
+CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=y
+CONFIG_NETFILTER_XT_MATCH_HELPER=y
+CONFIG_NETFILTER_XT_MATCH_HL=y
+CONFIG_NETFILTER_XT_MATCH_IPCOMP=y
+CONFIG_NETFILTER_XT_MATCH_IPRANGE=y
+CONFIG_NETFILTER_XT_MATCH_IPVS=y
+CONFIG_NETFILTER_XT_MATCH_L2TP=y
+CONFIG_NETFILTER_XT_MATCH_LENGTH=y
+CONFIG_NETFILTER_XT_MATCH_LIMIT=y
+CONFIG_NETFILTER_XT_MATCH_MAC=y
+CONFIG_NETFILTER_XT_MATCH_MARK=y
+CONFIG_NETFILTER_XT_MATCH_MULTIPORT=y
+CONFIG_NETFILTER_XT_MATCH_NFACCT=y
+CONFIG_NETFILTER_XT_MATCH_OSF=y
+CONFIG_NETFILTER_XT_MATCH_OWNER=y
+CONFIG_NETFILTER_XT_MATCH_POLICY=y
+CONFIG_NETFILTER_XT_MATCH_PHYSDEV=y
+CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y
+CONFIG_NETFILTER_XT_MATCH_QUOTA=y
+CONFIG_NETFILTER_XT_MATCH_RATEEST=y
+CONFIG_NETFILTER_XT_MATCH_REALM=y
+CONFIG_NETFILTER_XT_MATCH_RECENT=y
+CONFIG_NETFILTER_XT_MATCH_SCTP=y
+CONFIG_NETFILTER_XT_MATCH_SOCKET=y
+CONFIG_NETFILTER_XT_MATCH_STATE=y
+CONFIG_NETFILTER_XT_MATCH_STATISTIC=y
+CONFIG_NETFILTER_XT_MATCH_STRING=y
+CONFIG_NETFILTER_XT_MATCH_TCPMSS=y
+CONFIG_NETFILTER_XT_MATCH_TIME=y
+CONFIG_NETFILTER_XT_MATCH_U32=y
+CONFIG_IP_SET=y
+CONFIG_IP_SET_MAX=256
+CONFIG_IP_SET_BITMAP_IP=y
+CONFIG_IP_SET_BITMAP_IPMAC=y
+CONFIG_IP_SET_BITMAP_PORT=y
+CONFIG_IP_SET_HASH_IP=y
+# CONFIG_IP_SET_HASH_IPMARK is not set
+CONFIG_IP_SET_HASH_IPPORT=y
+CONFIG_IP_SET_HASH_IPPORTIP=y
+CONFIG_IP_SET_HASH_IPPORTNET=y
+# CONFIG_IP_SET_HASH_NETPORTNET is not set
+CONFIG_IP_SET_HASH_NET=y
+# CONFIG_IP_SET_HASH_NETNET is not set
+CONFIG_IP_SET_HASH_NETPORT=y
+CONFIG_IP_SET_HASH_NETIFACE=y
+CONFIG_IP_SET_LIST_SET=y
+CONFIG_IP_VS=y
+CONFIG_IP_VS_IPV6=y
+# CONFIG_IP_VS_DEBUG is not set
+CONFIG_IP_VS_TAB_BITS=12
+
+#
+# IPVS transport protocol load balancing support
+#
+CONFIG_IP_VS_PROTO_TCP=y
+CONFIG_IP_VS_PROTO_UDP=y
+CONFIG_IP_VS_PROTO_AH_ESP=y
+CONFIG_IP_VS_PROTO_ESP=y
+CONFIG_IP_VS_PROTO_AH=y
+CONFIG_IP_VS_PROTO_SCTP=y
+
+#
+# IPVS scheduler
+#
+CONFIG_IP_VS_RR=y
+CONFIG_IP_VS_WRR=y
+CONFIG_IP_VS_LC=y
+CONFIG_IP_VS_WLC=y
+CONFIG_IP_VS_LBLC=y
+CONFIG_IP_VS_LBLCR=y
+CONFIG_IP_VS_DH=y
+CONFIG_IP_VS_SH=y
+CONFIG_IP_VS_SED=y
+CONFIG_IP_VS_NQ=y
+
+#
+# IPVS SH scheduler
+#
+CONFIG_IP_VS_SH_TAB_BITS=8
+
+#
+# IPVS application helper
+#
+# CONFIG_IP_VS_FTP is not set
+CONFIG_IP_VS_NFCT=y
+CONFIG_IP_VS_PE_SIP=y
+
+#
+# IP: Netfilter Configuration
+#
+CONFIG_NF_DEFRAG_IPV4=y
+CONFIG_NF_CONNTRACK_IPV4=y
+CONFIG_NF_CONNTRACK_PROC_COMPAT=y
+CONFIG_IP_NF_IPTABLES=y
+CONFIG_IP_NF_MATCH_AH=y
+CONFIG_IP_NF_MATCH_ECN=y
+# CONFIG_IP_NF_MATCH_RPFILTER is not set
+CONFIG_IP_NF_MATCH_TTL=y
+CONFIG_IP_NF_FILTER=y
+CONFIG_IP_NF_TARGET_REJECT=y
+# CONFIG_IP_NF_TARGET_SYNPROXY is not set
+# CONFIG_IP_NF_TARGET_ULOG is not set
+CONFIG_NF_NAT_IPV4=y
+CONFIG_IP_NF_TARGET_MASQUERADE=y
+# CONFIG_IP_NF_TARGET_NETMAP is not set
+# CONFIG_IP_NF_TARGET_REDIRECT is not set
+CONFIG_NF_NAT_SNMP_BASIC=y
+CONFIG_NF_NAT_PROTO_GRE=y
+CONFIG_NF_NAT_PPTP=y
+CONFIG_NF_NAT_H323=y
+CONFIG_IP_NF_MANGLE=y
+CONFIG_IP_NF_TARGET_CLUSTERIP=y
+CONFIG_IP_NF_TARGET_ECN=y
+CONFIG_IP_NF_TARGET_TTL=y
+CONFIG_IP_NF_RAW=y
+CONFIG_IP_NF_ARPTABLES=y
+CONFIG_IP_NF_ARPFILTER=y
+CONFIG_IP_NF_ARP_MANGLE=y
+
+#
+# IPv6: Netfilter Configuration
+#
+CONFIG_NF_DEFRAG_IPV6=y
+CONFIG_NF_CONNTRACK_IPV6=y
+CONFIG_IP6_NF_IPTABLES=y
+CONFIG_IP6_NF_MATCH_AH=y
+CONFIG_IP6_NF_MATCH_EUI64=y
+CONFIG_IP6_NF_MATCH_FRAG=y
+CONFIG_IP6_NF_MATCH_OPTS=y
+CONFIG_IP6_NF_MATCH_HL=y
+CONFIG_IP6_NF_MATCH_IPV6HEADER=y
+CONFIG_IP6_NF_MATCH_MH=y
+# CONFIG_IP6_NF_MATCH_RPFILTER is not set
+CONFIG_IP6_NF_MATCH_RT=y
+CONFIG_IP6_NF_TARGET_HL=y
+CONFIG_IP6_NF_FILTER=y
+CONFIG_IP6_NF_TARGET_REJECT=y
+# CONFIG_IP6_NF_TARGET_SYNPROXY is not set
+CONFIG_IP6_NF_MANGLE=y
+CONFIG_IP6_NF_RAW=y
+# CONFIG_NF_NAT_IPV6 is not set
+CONFIG_BRIDGE_NF_EBTABLES=y
+CONFIG_BRIDGE_EBT_BROUTE=y
+CONFIG_BRIDGE_EBT_T_FILTER=y
+CONFIG_BRIDGE_EBT_T_NAT=y
+CONFIG_BRIDGE_EBT_802_3=y
+CONFIG_BRIDGE_EBT_AMONG=y
+CONFIG_BRIDGE_EBT_ARP=y
+CONFIG_BRIDGE_EBT_IP=y
+CONFIG_BRIDGE_EBT_IP6=y
+CONFIG_BRIDGE_EBT_LIMIT=y
+CONFIG_BRIDGE_EBT_MARK=y
+CONFIG_BRIDGE_EBT_PKTTYPE=y
+CONFIG_BRIDGE_EBT_STP=y
+CONFIG_BRIDGE_EBT_VLAN=y
+CONFIG_BRIDGE_EBT_ARPREPLY=y
+CONFIG_BRIDGE_EBT_DNAT=y
+CONFIG_BRIDGE_EBT_MARK_T=y
+CONFIG_BRIDGE_EBT_REDIRECT=y
+CONFIG_BRIDGE_EBT_SNAT=y
+CONFIG_BRIDGE_EBT_LOG=y
+# CONFIG_BRIDGE_EBT_ULOG is not set
+CONFIG_BRIDGE_EBT_NFLOG=y
+# CONFIG_IP_DCCP is not set
+CONFIG_IP_SCTP=y
+# CONFIG_SCTP_DBG_OBJCNT is not set
+CONFIG_SCTP_DEFAULT_COOKIE_HMAC_MD5=y
+# CONFIG_SCTP_DEFAULT_COOKIE_HMAC_SHA1 is not set
+# CONFIG_SCTP_DEFAULT_COOKIE_HMAC_NONE is not set
+CONFIG_SCTP_COOKIE_HMAC_MD5=y
+# CONFIG_SCTP_COOKIE_HMAC_SHA1 is not set
+# CONFIG_RDS is not set
+# CONFIG_TIPC is not set
+# CONFIG_ATM is not set
+# CONFIG_L2TP is not set
+CONFIG_STP=y
+CONFIG_BRIDGE=y
+CONFIG_BRIDGE_IGMP_SNOOPING=y
+CONFIG_BRIDGE_VLAN_FILTERING=y
+CONFIG_HAVE_NET_DSA=y
+CONFIG_VLAN_8021Q=y
+# CONFIG_VLAN_8021Q_GVRP is not set
+# CONFIG_VLAN_8021Q_MVRP is not set
+# CONFIG_DECNET is not set
+CONFIG_LLC=y
+CONFIG_LLC2=y
+# CONFIG_IPX is not set
+# CONFIG_ATALK is not set
+# CONFIG_X25 is not set
+# CONFIG_LAPB is not set
+# CONFIG_PHONET is not set
+# CONFIG_IEEE802154 is not set
+# CONFIG_NET_SCHED is not set
+# CONFIG_DCB is not set
+CONFIG_DNS_RESOLVER=y
+# CONFIG_BATMAN_ADV is not set
+# CONFIG_OPENVSWITCH is not set
+# CONFIG_VSOCKETS is not set
+CONFIG_NETLINK_MMAP=y
+CONFIG_NETLINK_DIAG=y
+# CONFIG_NET_MPLS_GSO is not set
+# CONFIG_HSR is not set
+CONFIG_RPS=y
+CONFIG_RFS_ACCEL=y
+CONFIG_XPS=y
+# CONFIG_CGROUP_NET_PRIO is not set
+CONFIG_CGROUP_NET_CLASSID=y
+CONFIG_NET_RX_BUSY_POLL=y
+CONFIG_BQL=y
+# CONFIG_BPF_JIT is not set
+CONFIG_NET_FLOW_LIMIT=y
+
+#
+# Network testing
+#
+CONFIG_NET_PKTGEN=y
+CONFIG_NET_DROP_MONITOR=y
+# CONFIG_HAMRADIO is not set
+# CONFIG_CAN is not set
+# CONFIG_IRDA is not set
+# CONFIG_BT is not set
+CONFIG_AF_RXRPC=y
+# CONFIG_AF_RXRPC_DEBUG is not set
+# CONFIG_RXKAD is not set
+CONFIG_FIB_RULES=y
+CONFIG_WIRELESS=y
+# CONFIG_CFG80211 is not set
+# CONFIG_LIB80211 is not set
+
+#
+# CFG80211 needs to be enabled for MAC80211
+#
+# CONFIG_WIMAX is not set
+# CONFIG_RFKILL is not set
+# CONFIG_NET_9P is not set
+# CONFIG_CAIF is not set
+CONFIG_CEPH_LIB=y
+# CONFIG_CEPH_LIB_PRETTYDEBUG is not set
+# CONFIG_CEPH_LIB_USE_DNS_RESOLVER is not set
+# CONFIG_NFC is not set
+CONFIG_HAVE_BPF_JIT=y
+
+#
+# Device Drivers
+#
+
+#
+# Generic Driver Options
+#
+CONFIG_UEVENT_HELPER=y
+CONFIG_UEVENT_HELPER_PATH=""
+CONFIG_DEVTMPFS=y
+# CONFIG_DEVTMPFS_MOUNT is not set
+CONFIG_STANDALONE=y
+CONFIG_PREVENT_FIRMWARE_BUILD=y
+CONFIG_FW_LOADER=y
+# CONFIG_FIRMWARE_IN_KERNEL is not set
+CONFIG_EXTRA_FIRMWARE=""
+CONFIG_FW_LOADER_USER_HELPER=y
+# CONFIG_DEBUG_DRIVER is not set
+# CONFIG_DEBUG_DEVRES is not set
+# CONFIG_SYS_HYPERVISOR is not set
+# CONFIG_GENERIC_CPU_DEVICES is not set
+CONFIG_GENERIC_CPU_AUTOPROBE=y
+# CONFIG_DMA_SHARED_BUFFER is not set
+
+#
+# Bus devices
+#
+CONFIG_CONNECTOR=y
+CONFIG_PROC_EVENTS=y
+# CONFIG_MTD is not set
+CONFIG_ARCH_MIGHT_HAVE_PC_PARPORT=y
+# CONFIG_PARPORT is not set
+CONFIG_PNP=y
+CONFIG_PNP_DEBUG_MESSAGES=y
+
+#
+# Protocols
+#
+CONFIG_PNPACPI=y
+CONFIG_BLK_DEV=y
+# CONFIG_BLK_DEV_NULL_BLK is not set
+# CONFIG_BLK_DEV_FD is not set
+# CONFIG_BLK_DEV_PCIESSD_MTIP32XX is not set
+# CONFIG_BLK_CPQ_CISS_DA is not set
+# CONFIG_BLK_DEV_DAC960 is not set
+# CONFIG_BLK_DEV_UMEM is not set
+# CONFIG_BLK_DEV_COW_COMMON is not set
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_LOOP_MIN_COUNT=8
+# CONFIG_BLK_DEV_CRYPTOLOOP is not set
+# CONFIG_BLK_DEV_DRBD is not set
+CONFIG_BLK_DEV_NBD=y
+# CONFIG_BLK_DEV_NVME is not set
+# CONFIG_BLK_DEV_SKD is not set
+# CONFIG_BLK_DEV_OSD is not set
+CONFIG_BLK_DEV_SX8=y
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_COUNT=16
+CONFIG_BLK_DEV_RAM_SIZE=65536
+# CONFIG_BLK_DEV_XIP is not set
+# CONFIG_CDROM_PKTCDVD is not set
+# CONFIG_ATA_OVER_ETH is not set
+CONFIG_VIRTIO_BLK=y
+# CONFIG_BLK_DEV_HD is not set
+# CONFIG_BLK_DEV_RBD is not set
+# CONFIG_BLK_DEV_RSXX is not set
+
+#
+# Misc devices
+#
+# CONFIG_SENSORS_LIS3LV02D is not set
+# CONFIG_AD525X_DPOT is not set
+CONFIG_DUMMY_IRQ=y
+# CONFIG_IBM_ASM is not set
+# CONFIG_PHANTOM is not set
+# CONFIG_SGI_IOC4 is not set
+# CONFIG_TIFM_CORE is not set
+# CONFIG_ICS932S401 is not set
+# CONFIG_ENCLOSURE_SERVICES is not set
+# CONFIG_HP_ILO is not set
+# CONFIG_APDS9802ALS is not set
+# CONFIG_ISL29003 is not set
+# CONFIG_ISL29020 is not set
+# CONFIG_SENSORS_TSL2550 is not set
+# CONFIG_SENSORS_BH1780 is not set
+# CONFIG_SENSORS_BH1770 is not set
+# CONFIG_SENSORS_APDS990X is not set
+# CONFIG_HMC6352 is not set
+# CONFIG_DS1682 is not set
+CONFIG_TI_DAC7512=y
+# CONFIG_BMP085_I2C is not set
+# CONFIG_BMP085_SPI is not set
+# CONFIG_USB_SWITCH_FSA9480 is not set
+# CONFIG_LATTICE_ECP3_CONFIG is not set
+# CONFIG_SRAM is not set
+# CONFIG_C2PORT is not set
+
+#
+# EEPROM support
+#
+CONFIG_EEPROM_AT24=y
+CONFIG_EEPROM_AT25=y
+# CONFIG_EEPROM_LEGACY is not set
+# CONFIG_EEPROM_MAX6875 is not set
+CONFIG_EEPROM_93CX6=y
+# CONFIG_EEPROM_93XX46 is not set
+CONFIG_EEPROM_SFF_8436=y
+CONFIG_CB710_CORE=y
+# CONFIG_CB710_DEBUG is not set
+CONFIG_CB710_DEBUG_ASSUMPTIONS=y
+
+#
+# Texas Instruments shared transport line discipline
+#
+# CONFIG_TI_ST is not set
+# CONFIG_SENSORS_LIS3_I2C is not set
+
+#
+# Altera FPGA firmware download module
+#
+# CONFIG_ALTERA_STAPL is not set
+# CONFIG_VMWARE_VMCI is not set
+
+#
+# Intel MIC Host Driver
+#
+# CONFIG_INTEL_MIC_HOST is not set
+
+#
+# Intel MIC Card Driver
+#
+# CONFIG_INTEL_MIC_CARD is not set
+# CONFIG_GENWQE is not set
+# CONFIG_ECHO is not set
+CONFIG_HAVE_IDE=y
+# CONFIG_IDE is not set
+
+#
+# SCSI device support
+#
+CONFIG_SCSI_MOD=y
+CONFIG_RAID_ATTRS=y
+CONFIG_SCSI=y
+CONFIG_SCSI_DMA=y
+# CONFIG_SCSI_TGT is not set
+CONFIG_SCSI_NETLINK=y
+# CONFIG_SCSI_PROC_FS is not set
+
+#
+# SCSI support type (disk, tape, CD-ROM)
+#
+CONFIG_BLK_DEV_SD=y
+# CONFIG_CHR_DEV_ST is not set
+# CONFIG_CHR_DEV_OSST is not set
+# CONFIG_BLK_DEV_SR is not set
+CONFIG_CHR_DEV_SG=y
+# CONFIG_CHR_DEV_SCH is not set
+# CONFIG_SCSI_MULTI_LUN is not set
+# CONFIG_SCSI_CONSTANTS is not set
+# CONFIG_SCSI_LOGGING is not set
+# CONFIG_SCSI_SCAN_ASYNC is not set
+
+#
+# SCSI Transports
+#
+CONFIG_SCSI_SPI_ATTRS=y
+CONFIG_SCSI_FC_ATTRS=y
+CONFIG_SCSI_ISCSI_ATTRS=y
+CONFIG_SCSI_SAS_ATTRS=y
+CONFIG_SCSI_SAS_LIBSAS=y
+CONFIG_SCSI_SAS_ATA=y
+CONFIG_SCSI_SAS_HOST_SMP=y
+CONFIG_SCSI_SRP_ATTRS=y
+CONFIG_SCSI_LOWLEVEL=y
+CONFIG_ISCSI_TCP=y
+CONFIG_ISCSI_BOOT_SYSFS=y
+CONFIG_SCSI_CXGB3_ISCSI=y
+CONFIG_SCSI_CXGB4_ISCSI=y
+CONFIG_SCSI_BNX2_ISCSI=y
+CONFIG_SCSI_BNX2X_FCOE=y
+CONFIG_BE2ISCSI=y
+CONFIG_BLK_DEV_3W_XXXX_RAID=y
+CONFIG_SCSI_HPSA=y
+CONFIG_SCSI_3W_9XXX=y
+CONFIG_SCSI_3W_SAS=y
+CONFIG_SCSI_ACARD=y
+CONFIG_SCSI_AACRAID=y
+CONFIG_SCSI_AIC7XXX=y
+CONFIG_AIC7XXX_CMDS_PER_DEVICE=8
+CONFIG_AIC7XXX_RESET_DELAY_MS=15000
+CONFIG_AIC7XXX_DEBUG_ENABLE=y
+CONFIG_AIC7XXX_DEBUG_MASK=0
+CONFIG_AIC7XXX_REG_PRETTY_PRINT=y
+CONFIG_SCSI_AIC79XX=y
+CONFIG_AIC79XX_CMDS_PER_DEVICE=32
+CONFIG_AIC79XX_RESET_DELAY_MS=15000
+CONFIG_AIC79XX_DEBUG_ENABLE=y
+CONFIG_AIC79XX_DEBUG_MASK=0
+CONFIG_AIC79XX_REG_PRETTY_PRINT=y
+CONFIG_SCSI_AIC94XX=y
+# CONFIG_AIC94XX_DEBUG is not set
+CONFIG_SCSI_MVSAS=y
+# CONFIG_SCSI_MVSAS_DEBUG is not set
+# CONFIG_SCSI_MVSAS_TASKLET is not set
+CONFIG_SCSI_MVUMI=y
+CONFIG_SCSI_DPT_I2O=y
+CONFIG_SCSI_ADVANSYS=y
+CONFIG_SCSI_ARCMSR=y
+# CONFIG_SCSI_ESAS2R is not set
+CONFIG_MEGARAID_NEWGEN=y
+CONFIG_MEGARAID_MM=y
+CONFIG_MEGARAID_MAILBOX=y
+CONFIG_MEGARAID_LEGACY=y
+CONFIG_MEGARAID_SAS=y
+CONFIG_SCSI_MPT2SAS=y
+CONFIG_SCSI_MPT2SAS_MAX_SGE=128
+# CONFIG_SCSI_MPT2SAS_LOGGING is not set
+# CONFIG_SCSI_MPT3SAS is not set
+# CONFIG_SCSI_UFSHCD is not set
+CONFIG_SCSI_HPTIOP=y
+CONFIG_SCSI_BUSLOGIC=y
+# CONFIG_SCSI_FLASHPOINT is not set
+CONFIG_VMWARE_PVSCSI=y
+CONFIG_LIBFC=y
+CONFIG_LIBFCOE=y
+CONFIG_FCOE=y
+CONFIG_FCOE_FNIC=y
+CONFIG_SCSI_DMX3191D=y
+CONFIG_SCSI_EATA=y
+CONFIG_SCSI_EATA_TAGGED_QUEUE=y
+CONFIG_SCSI_EATA_LINKED_COMMANDS=y
+CONFIG_SCSI_EATA_MAX_TAGS=16
+CONFIG_SCSI_FUTURE_DOMAIN=y
+CONFIG_SCSI_GDTH=y
+CONFIG_SCSI_ISCI=y
+CONFIG_SCSI_IPS=y
+CONFIG_SCSI_INITIO=y
+CONFIG_SCSI_INIA100=y
+CONFIG_SCSI_STEX=y
+CONFIG_SCSI_SYM53C8XX_2=y
+CONFIG_SCSI_SYM53C8XX_DMA_ADDRESSING_MODE=1
+CONFIG_SCSI_SYM53C8XX_DEFAULT_TAGS=16
+CONFIG_SCSI_SYM53C8XX_MAX_TAGS=64
+CONFIG_SCSI_SYM53C8XX_MMIO=y
+CONFIG_SCSI_IPR=y
+# CONFIG_SCSI_IPR_TRACE is not set
+# CONFIG_SCSI_IPR_DUMP is not set
+CONFIG_SCSI_QLOGIC_1280=y
+CONFIG_SCSI_QLA_FC=y
+CONFIG_SCSI_QLA_ISCSI=y
+CONFIG_SCSI_LPFC=y
+# CONFIG_SCSI_LPFC_DEBUG_FS is not set
+CONFIG_SCSI_DC395x=y
+CONFIG_SCSI_DC390T=y
+CONFIG_SCSI_DEBUG=y
+CONFIG_SCSI_PMCRAID=y
+CONFIG_SCSI_PM8001=y
+# CONFIG_SCSI_SRP is not set
+CONFIG_SCSI_BFA_FC=y
+# CONFIG_SCSI_VIRTIO is not set
+# CONFIG_SCSI_CHELSIO_FCOE is not set
+CONFIG_SCSI_LOWLEVEL_PCMCIA=y
+# CONFIG_PCMCIA_AHA152X is not set
+# CONFIG_PCMCIA_FDOMAIN is not set
+# CONFIG_PCMCIA_QLOGIC is not set
+# CONFIG_PCMCIA_SYM53C500 is not set
+CONFIG_SCSI_DH=y
+CONFIG_SCSI_DH_RDAC=y
+CONFIG_SCSI_DH_HP_SW=y
+CONFIG_SCSI_DH_EMC=y
+CONFIG_SCSI_DH_ALUA=y
+CONFIG_SCSI_OSD_INITIATOR=y
+CONFIG_SCSI_OSD_ULD=y
+CONFIG_SCSI_OSD_DPRINT_SENSE=1
+# CONFIG_SCSI_OSD_DEBUG is not set
+CONFIG_ATA=y
+# CONFIG_ATA_NONSTANDARD is not set
+CONFIG_ATA_VERBOSE_ERROR=y
+CONFIG_ATA_ACPI=y
+CONFIG_SATA_PMP=y
+
+#
+# Controllers with non-SFF native interface
+#
+CONFIG_SATA_AHCI=y
+CONFIG_SATA_AHCI_PLATFORM=y
+# CONFIG_SATA_INIC162X is not set
+CONFIG_SATA_ACARD_AHCI=y
+CONFIG_SATA_SIL24=y
+CONFIG_ATA_SFF=y
+
+#
+# SFF controllers with custom DMA interface
+#
+CONFIG_PDC_ADMA=y
+CONFIG_SATA_QSTOR=y
+CONFIG_SATA_SX4=y
+CONFIG_ATA_BMDMA=y
+
+#
+# SATA SFF controllers with BMDMA
+#
+CONFIG_ATA_PIIX=y
+CONFIG_SATA_MV=y
+CONFIG_SATA_NV=y
+CONFIG_SATA_PROMISE=y
+CONFIG_SATA_SIL=y
+CONFIG_SATA_SIS=y
+CONFIG_SATA_SVW=y
+CONFIG_SATA_ULI=y
+CONFIG_SATA_VIA=y
+CONFIG_SATA_VITESSE=y
+
+#
+# PATA SFF controllers with BMDMA
+#
+CONFIG_PATA_ALI=y
+CONFIG_PATA_AMD=y
+CONFIG_PATA_ARTOP=y
+CONFIG_PATA_ATIIXP=y
+CONFIG_PATA_ATP867X=y
+CONFIG_PATA_CMD64X=y
+# CONFIG_PATA_CYPRESS is not set
+CONFIG_PATA_EFAR=y
+CONFIG_PATA_HPT366=y
+CONFIG_PATA_HPT37X=y
+# CONFIG_PATA_HPT3X2N is not set
+# CONFIG_PATA_HPT3X3 is not set
+CONFIG_PATA_IT8213=y
+CONFIG_PATA_IT821X=y
+CONFIG_PATA_JMICRON=y
+CONFIG_PATA_MARVELL=y
+CONFIG_PATA_NETCELL=y
+CONFIG_PATA_NINJA32=y
+CONFIG_PATA_NS87415=y
+CONFIG_PATA_OLDPIIX=y
+# CONFIG_PATA_OPTIDMA is not set
+CONFIG_PATA_PDC2027X=y
+CONFIG_PATA_PDC_OLD=y
+# CONFIG_PATA_RADISYS is not set
+CONFIG_PATA_RDC=y
+CONFIG_PATA_SCH=y
+CONFIG_PATA_SERVERWORKS=y
+CONFIG_PATA_SIL680=y
+CONFIG_PATA_SIS=y
+CONFIG_PATA_TOSHIBA=y
+CONFIG_PATA_TRIFLEX=y
+CONFIG_PATA_VIA=y
+# CONFIG_PATA_WINBOND is not set
+
+#
+# PIO-only SFF controllers
+#
+# CONFIG_PATA_CMD640_PCI is not set
+CONFIG_PATA_MPIIX=y
+CONFIG_PATA_NS87410=y
+# CONFIG_PATA_OPTI is not set
+CONFIG_PATA_PCMCIA=y
+CONFIG_PATA_PLATFORM=y
+CONFIG_PATA_RZ1000=y
+
+#
+# Generic fallback / legacy drivers
+#
+# CONFIG_PATA_ACPI is not set
+CONFIG_ATA_GENERIC=y
+# CONFIG_PATA_LEGACY is not set
+CONFIG_MD=y
+CONFIG_BLK_DEV_MD=y
+# CONFIG_MD_AUTODETECT is not set
+# CONFIG_MD_LINEAR is not set
+# CONFIG_MD_RAID0 is not set
+# CONFIG_MD_RAID1 is not set
+# CONFIG_MD_RAID10 is not set
+# CONFIG_MD_RAID456 is not set
+# CONFIG_MD_MULTIPATH is not set
+# CONFIG_MD_FAULTY is not set
+# CONFIG_BCACHE is not set
+CONFIG_BLK_DEV_DM_BUILTIN=y
+CONFIG_BLK_DEV_DM=y
+# CONFIG_DM_DEBUG is not set
+CONFIG_DM_CRYPT=y
+# CONFIG_DM_SNAPSHOT is not set
+# CONFIG_DM_THIN_PROVISIONING is not set
+# CONFIG_DM_CACHE is not set
+# CONFIG_DM_ERA is not set
+# CONFIG_DM_MIRROR is not set
+# CONFIG_DM_RAID is not set
+# CONFIG_DM_ZERO is not set
+# CONFIG_DM_MULTIPATH is not set
+# CONFIG_DM_DELAY is not set
+# CONFIG_DM_UEVENT is not set
+# CONFIG_DM_FLAKEY is not set
+# CONFIG_DM_VERITY is not set
+# CONFIG_DM_SWITCH is not set
+# CONFIG_TARGET_CORE is not set
+# CONFIG_FUSION is not set
+
+#
+# IEEE 1394 (FireWire) support
+#
+CONFIG_FIREWIRE=y
+CONFIG_FIREWIRE_OHCI=y
+CONFIG_FIREWIRE_SBP2=y
+CONFIG_FIREWIRE_NET=y
+CONFIG_FIREWIRE_NOSY=y
+# CONFIG_I2O is not set
+# CONFIG_MACINTOSH_DRIVERS is not set
+CONFIG_NETDEVICES=y
+CONFIG_MII=y
+CONFIG_NET_CORE=y
+# CONFIG_BONDING is not set
+CONFIG_DUMMY=y
+# CONFIG_EQUALIZER is not set
+# CONFIG_NET_FC is not set
+# CONFIG_NET_TEAM is not set
+CONFIG_MACVLAN=y
+CONFIG_MACVTAP=y
+# CONFIG_VXLAN is not set
+# CONFIG_NETCONSOLE is not set
+# CONFIG_NETPOLL is not set
+# CONFIG_NET_POLL_CONTROLLER is not set
+CONFIG_TUN=y
+CONFIG_VETH=y
+CONFIG_VIRTIO_NET=y
+# CONFIG_NLMON is not set
+# CONFIG_ARCNET is not set
+
+#
+# CAIF transport drivers
+#
+
+#
+# Distributed Switch Architecture drivers
+#
+# CONFIG_NET_DSA_MV88E6XXX is not set
+# CONFIG_NET_DSA_MV88E6060 is not set
+# CONFIG_NET_DSA_MV88E6XXX_NEED_PPU is not set
+# CONFIG_NET_DSA_MV88E6131 is not set
+# CONFIG_NET_DSA_MV88E6123_61_65 is not set
+CONFIG_ETHERNET=y
+CONFIG_MDIO=y
+# CONFIG_NET_VENDOR_3COM is not set
+# CONFIG_NET_VENDOR_ADAPTEC is not set
+# CONFIG_NET_VENDOR_ALTEON is not set
+# CONFIG_ALTERA_TSE is not set
+# CONFIG_NET_VENDOR_AMD is not set
+# CONFIG_NET_XGENE is not set
+CONFIG_NET_VENDOR_ARC=y
+# CONFIG_NET_VENDOR_ATHEROS is not set
+CONFIG_NET_VENDOR_BROADCOM=y
+CONFIG_B44=y
+CONFIG_B44_PCI_AUTOSELECT=y
+CONFIG_B44_PCICORE_AUTOSELECT=y
+CONFIG_B44_PCI=y
+CONFIG_BNX2=y
+CONFIG_CNIC=y
+CONFIG_TIGON3=y
+CONFIG_BNX2X=y
+CONFIG_BNX2X_SRIOV=y
+# CONFIG_NET_VENDOR_BROCADE is not set
+# CONFIG_NET_CALXEDA_XGMAC is not set
+CONFIG_NET_VENDOR_CHELSIO=y
+# CONFIG_CHELSIO_T1 is not set
+CONFIG_CHELSIO_T3=y
+CONFIG_CHELSIO_T4=y
+CONFIG_CHELSIO_T4VF=y
+# CONFIG_NET_VENDOR_CISCO is not set
+# CONFIG_CX_ECAT is not set
+# CONFIG_DNET is not set
+# CONFIG_NET_VENDOR_DEC is not set
+# CONFIG_NET_VENDOR_DLINK is not set
+# CONFIG_NET_VENDOR_EMULEX is not set
+# CONFIG_NET_VENDOR_EXAR is not set
+# CONFIG_NET_VENDOR_FUJITSU is not set
+# CONFIG_NET_VENDOR_HP is not set
+CONFIG_NET_VENDOR_INTEL=y
+# CONFIG_E100 is not set
+CONFIG_E1000=y
+CONFIG_E1000E=y
+CONFIG_IGB=y
+CONFIG_IGB_HWMON=y
+CONFIG_IGBVF=y
+CONFIG_IXGB=y
+CONFIG_IXGBE=y
+CONFIG_IXGBE_HWMON=y
+CONFIG_IXGBEVF=y
+# CONFIG_I40E is not set
+# CONFIG_I40EVF is not set
+CONFIG_NET_VENDOR_I825XX=y
+# CONFIG_IP1000 is not set
+# CONFIG_JME is not set
+# CONFIG_NET_VENDOR_MARVELL is not set
+CONFIG_NET_VENDOR_MELLANOX=y
+# CONFIG_MLX4_EN is not set
+# CONFIG_MLX4_CORE is not set
+# CONFIG_MLX5_CORE is not set
+# CONFIG_NET_VENDOR_MICREL is not set
+CONFIG_NET_VENDOR_MICROCHIP=y
+CONFIG_ENC28J60=y
+CONFIG_ENC28J60_WRITEVERIFY=y
+# CONFIG_NET_VENDOR_MYRI is not set
+# CONFIG_FEALNX is not set
+# CONFIG_NET_VENDOR_NATSEMI is not set
+# CONFIG_NET_VENDOR_NVIDIA is not set
+# CONFIG_NET_VENDOR_OKI is not set
+# CONFIG_ETHOC is not set
+# CONFIG_NET_PACKET_ENGINE is not set
+# CONFIG_NET_VENDOR_QLOGIC is not set
+CONFIG_NET_VENDOR_REALTEK=y
+# CONFIG_8139CP is not set
+# CONFIG_8139TOO is not set
+CONFIG_R8169=y
+# CONFIG_SH_ETH is not set
+# CONFIG_NET_VENDOR_RDC is not set
+CONFIG_NET_VENDOR_SAMSUNG=y
+# CONFIG_SXGBE_ETH is not set
+# CONFIG_NET_VENDOR_SEEQ is not set
+# CONFIG_NET_VENDOR_SILAN is not set
+# CONFIG_NET_VENDOR_SIS is not set
+# CONFIG_SFC is not set
+# CONFIG_NET_VENDOR_SMSC is not set
+# CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_SUN is not set
+# CONFIG_NET_VENDOR_TEHUTI is not set
+# CONFIG_NET_VENDOR_TI is not set
+# CONFIG_NET_VENDOR_VIA is not set
+CONFIG_NET_VENDOR_WIZNET=y
+# CONFIG_WIZNET_W5100 is not set
+# CONFIG_WIZNET_W5300 is not set
+# CONFIG_NET_VENDOR_XIRCOM is not set
+# CONFIG_FDDI is not set
+# CONFIG_HIPPI is not set
+# CONFIG_NET_SB1000 is not set
+CONFIG_PHYLIB=y
+
+#
+# MII PHY device drivers
+#
+# CONFIG_AT803X_PHY is not set
+# CONFIG_AMD_PHY is not set
+CONFIG_MARVELL_PHY=y
+CONFIG_DAVICOM_PHY=y
+CONFIG_QSEMI_PHY=y
+CONFIG_LXT_PHY=y
+CONFIG_CICADA_PHY=y
+CONFIG_VITESSE_PHY=y
+CONFIG_SMSC_PHY=y
+CONFIG_BROADCOM_PHY=y
+# CONFIG_BCM7XXX_PHY is not set
+# CONFIG_BCM87XX_PHY is not set
+# CONFIG_ICPLUS_PHY is not set
+CONFIG_REALTEK_PHY=y
+CONFIG_NATIONAL_PHY=y
+CONFIG_STE10XP=y
+CONFIG_LSI_ET1011C_PHY=y
+CONFIG_MICREL_PHY=y
+CONFIG_FIXED_PHY=y
+CONFIG_MDIO_BITBANG=y
+# CONFIG_MDIO_GPIO is not set
+# CONFIG_MICREL_KS8995MA is not set
+CONFIG_PPP=y
+# CONFIG_PPP_BSDCOMP is not set
+# CONFIG_PPP_DEFLATE is not set
+# CONFIG_PPP_FILTER is not set
+# CONFIG_PPP_MPPE is not set
+# CONFIG_PPP_MULTILINK is not set
+# CONFIG_PPPOE is not set
+# CONFIG_PPP_ASYNC is not set
+# CONFIG_PPP_SYNC_TTY is not set
+# CONFIG_SLIP is not set
+CONFIG_SLHC=y
+
+#
+# USB Network Adapters
+#
+# CONFIG_USB_CATC is not set
+# CONFIG_USB_KAWETH is not set
+# CONFIG_USB_PEGASUS is not set
+# CONFIG_USB_RTL8150 is not set
+# CONFIG_USB_RTL8152 is not set
+CONFIG_USB_USBNET=y
+# CONFIG_USB_NET_AX8817X is not set
+# CONFIG_USB_NET_AX88179_178A is not set
+CONFIG_USB_NET_CDCETHER=y
+# CONFIG_USB_NET_CDC_EEM is not set
+CONFIG_USB_NET_CDC_NCM=y
+# CONFIG_USB_NET_HUAWEI_CDC_NCM is not set
+# CONFIG_USB_NET_CDC_MBIM is not set
+# CONFIG_USB_NET_DM9601 is not set
+# CONFIG_USB_NET_SR9700 is not set
+# CONFIG_USB_NET_SR9800 is not set
+# CONFIG_USB_NET_SMSC75XX is not set
+# CONFIG_USB_NET_SMSC95XX is not set
+# CONFIG_USB_NET_GL620A is not set
+CONFIG_USB_NET_NET1080=y
+# CONFIG_USB_NET_PLUSB is not set
+# CONFIG_USB_NET_MCS7830 is not set
+# CONFIG_USB_NET_RNDIS_HOST is not set
+CONFIG_USB_NET_CDC_SUBSET=y
+# CONFIG_USB_ALI_M5632 is not set
+# CONFIG_USB_AN2720 is not set
+CONFIG_USB_BELKIN=y
+# CONFIG_USB_ARMLINUX is not set
+# CONFIG_USB_EPSON2888 is not set
+# CONFIG_USB_KC2190 is not set
+CONFIG_USB_NET_ZAURUS=y
+# CONFIG_USB_NET_CX82310_ETH is not set
+# CONFIG_USB_NET_KALMIA is not set
+# CONFIG_USB_NET_QMI_WWAN is not set
+# CONFIG_USB_NET_INT51X1 is not set
+# CONFIG_USB_IPHETH is not set
+# CONFIG_USB_SIERRA_NET is not set
+# CONFIG_USB_VL600 is not set
+# CONFIG_WLAN is not set
+
+#
+# Enable WiMAX (Networking options) to see the WiMAX drivers
+#
+# CONFIG_WAN is not set
+# CONFIG_VMXNET3 is not set
+# CONFIG_ISDN is not set
+
+#
+# Input device support
+#
+CONFIG_INPUT=y
+CONFIG_INPUT_FF_MEMLESS=y
+CONFIG_INPUT_POLLDEV=y
+CONFIG_INPUT_SPARSEKMAP=y
+# CONFIG_INPUT_MATRIXKMAP is not set
+
+#
+# Userland interfaces
+#
+CONFIG_INPUT_MOUSEDEV=y
+CONFIG_INPUT_MOUSEDEV_PSAUX=y
+CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024
+CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
+CONFIG_INPUT_JOYDEV=y
+CONFIG_INPUT_EVDEV=y
+# CONFIG_INPUT_EVBUG is not set
+
+#
+# Input Device Drivers
+#
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_INPUT_JOYSTICK is not set
+# CONFIG_INPUT_TABLET is not set
+# CONFIG_INPUT_TOUCHSCREEN is not set
+# CONFIG_INPUT_MISC is not set
+
+#
+# Hardware I/O ports
+#
+# CONFIG_SERIO is not set
+CONFIG_ARCH_MIGHT_HAVE_PC_SERIO=y
+# CONFIG_GAMEPORT is not set
+
+#
+# Character devices
+#
+CONFIG_TTY=y
+CONFIG_VT=y
+CONFIG_CONSOLE_TRANSLATIONS=y
+CONFIG_VT_CONSOLE=y
+CONFIG_HW_CONSOLE=y
+CONFIG_VT_HW_CONSOLE_BINDING=y
+CONFIG_UNIX98_PTYS=y
+CONFIG_DEVPTS_MULTIPLE_INSTANCES=y
+# CONFIG_LEGACY_PTYS is not set
+CONFIG_SERIAL_NONSTANDARD=y
+# CONFIG_ROCKETPORT is not set
+# CONFIG_CYCLADES is not set
+# CONFIG_MOXA_INTELLIO is not set
+# CONFIG_MOXA_SMARTIO is not set
+# CONFIG_SYNCLINK is not set
+# CONFIG_SYNCLINKMP is not set
+# CONFIG_SYNCLINK_GT is not set
+# CONFIG_NOZOMI is not set
+# CONFIG_ISI is not set
+# CONFIG_N_HDLC is not set
+# CONFIG_N_GSM is not set
+# CONFIG_TRACE_SINK is not set
+# CONFIG_DEVKMEM is not set
+
+#
+# Serial drivers
+#
+CONFIG_SERIAL_EARLYCON=y
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_DEPRECATED_OPTIONS=y
+CONFIG_SERIAL_8250_PNP=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_PCI=y
+CONFIG_SERIAL_8250_CS=y
+CONFIG_SERIAL_8250_NR_UARTS=32
+CONFIG_SERIAL_8250_RUNTIME_UARTS=4
+CONFIG_SERIAL_8250_EXTENDED=y
+CONFIG_SERIAL_8250_MANY_PORTS=y
+CONFIG_SERIAL_8250_SHARE_IRQ=y
+# CONFIG_SERIAL_8250_DETECT_IRQ is not set
+CONFIG_SERIAL_8250_RSA=y
+# CONFIG_SERIAL_8250_DW is not set
+
+#
+# Non-8250 serial port support
+#
+# CONFIG_SERIAL_MAX3100 is not set
+# CONFIG_SERIAL_MAX310X is not set
+CONFIG_SERIAL_MFD_HSU=y
+# CONFIG_SERIAL_MFD_HSU_CONSOLE is not set
+CONFIG_SERIAL_CORE=y
+CONFIG_SERIAL_CORE_CONSOLE=y
+CONFIG_SERIAL_JSM=y
+# CONFIG_SERIAL_SCCNXP is not set
+# CONFIG_SERIAL_SC16IS7XX is not set
+# CONFIG_SERIAL_ALTERA_JTAGUART is not set
+# CONFIG_SERIAL_ALTERA_UART is not set
+# CONFIG_SERIAL_IFX6X60 is not set
+# CONFIG_SERIAL_ARC is not set
+# CONFIG_SERIAL_RP2 is not set
+# CONFIG_SERIAL_FSL_LPUART is not set
+# CONFIG_TTY_PRINTK is not set
+CONFIG_HVC_DRIVER=y
+CONFIG_VIRTIO_CONSOLE=y
+# CONFIG_IPMI_HANDLER is not set
+CONFIG_HW_RANDOM=y
+CONFIG_HW_RANDOM_TIMERIOMEM=y
+CONFIG_HW_RANDOM_INTEL=y
+CONFIG_HW_RANDOM_AMD=y
+CONFIG_HW_RANDOM_VIA=y
+# CONFIG_HW_RANDOM_VIRTIO is not set
+CONFIG_NVRAM=y
+# CONFIG_R3964 is not set
+# CONFIG_APPLICOM is not set
+
+#
+# PCMCIA character devices
+#
+CONFIG_SYNCLINK_CS=y
+CONFIG_CARDMAN_4000=y
+CONFIG_CARDMAN_4040=y
+CONFIG_IPWIRELESS=y
+# CONFIG_MWAVE is not set
+# CONFIG_RAW_DRIVER is not set
+# CONFIG_HPET is not set
+# CONFIG_HANGCHECK_TIMER is not set
+# CONFIG_TCG_TPM is not set
+# CONFIG_TELCLOCK is not set
+CONFIG_DEVPORT=y
+CONFIG_I2C=y
+CONFIG_I2C_BOARDINFO=y
+CONFIG_I2C_COMPAT=y
+CONFIG_I2C_CHARDEV=y
+CONFIG_I2C_MUX=y
+
+#
+# Multiplexer I2C Chip support
+#
+CONFIG_I2C_MUX_GPIO=y
+CONFIG_I2C_MUX_PCA9541=y
+CONFIG_I2C_MUX_PCA954x=y
+CONFIG_I2C_HELPER_AUTO=y
+CONFIG_I2C_ALGOBIT=y
+CONFIG_I2C_ALGOPCA=y
+
+#
+# I2C Hardware Bus support
+#
+
+#
+# PC SMBus host controller drivers
+#
+# CONFIG_I2C_ALI1535 is not set
+# CONFIG_I2C_ALI1563 is not set
+# CONFIG_I2C_ALI15X3 is not set
+# CONFIG_I2C_AMD756 is not set
+# CONFIG_I2C_AMD8111 is not set
+CONFIG_I2C_I801=y
+CONFIG_I2C_ISCH=y
+CONFIG_I2C_ISMT=y
+# CONFIG_I2C_PIIX4 is not set
+# CONFIG_I2C_NFORCE2 is not set
+# CONFIG_I2C_SIS5595 is not set
+# CONFIG_I2C_SIS630 is not set
+# CONFIG_I2C_SIS96X is not set
+# CONFIG_I2C_VIA is not set
+# CONFIG_I2C_VIAPRO is not set
+
+#
+# ACPI drivers
+#
+# CONFIG_I2C_SCMI is not set
+
+#
+# I2C system bus drivers (mostly embedded / system-on-chip)
+#
+# CONFIG_I2C_CBUS_GPIO is not set
+# CONFIG_I2C_DESIGNWARE_PLATFORM is not set
+# CONFIG_I2C_DESIGNWARE_PCI is not set
+# CONFIG_I2C_GPIO is not set
+# CONFIG_I2C_OCORES is not set
+CONFIG_I2C_PCA_PLATFORM=y
+# CONFIG_I2C_PXA_PCI is not set
+# CONFIG_I2C_SIMTEC is not set
+# CONFIG_I2C_XILINX is not set
+
+#
+# External I2C/SMBus adapter drivers
+#
+# CONFIG_I2C_DIOLAN_U2C is not set
+# CONFIG_I2C_PARPORT_LIGHT is not set
+# CONFIG_I2C_ROBOTFUZZ_OSIF is not set
+# CONFIG_I2C_TAOS_EVM is not set
+# CONFIG_I2C_TINY_USB is not set
+
+#
+# Other I2C/SMBus bus drivers
+#
+# CONFIG_I2C_STUB is not set
+# CONFIG_I2C_DEBUG_CORE is not set
+# CONFIG_I2C_DEBUG_ALGO is not set
+# CONFIG_I2C_DEBUG_BUS is not set
+CONFIG_SPI=y
+# CONFIG_SPI_DEBUG is not set
+CONFIG_SPI_MASTER=y
+
+#
+# SPI Master Controller Drivers
+#
+# CONFIG_SPI_ALTERA is not set
+# CONFIG_SPI_BITBANG is not set
+# CONFIG_SPI_GPIO is not set
+# CONFIG_SPI_OC_TINY is not set
+# CONFIG_SPI_PXA2XX is not set
+# CONFIG_SPI_PXA2XX_PCI is not set
+# CONFIG_SPI_SC18IS602 is not set
+# CONFIG_SPI_XCOMM is not set
+# CONFIG_SPI_XILINX is not set
+# CONFIG_SPI_DESIGNWARE is not set
+
+#
+# SPI Protocol Masters
+#
+# CONFIG_SPI_SPIDEV is not set
+# CONFIG_SPI_TLE62X0 is not set
+# CONFIG_SPMI is not set
+# CONFIG_HSI is not set
+
+#
+# PPS support
+#
+CONFIG_PPS=y
+# CONFIG_PPS_DEBUG is not set
+
+#
+# PPS clients support
+#
+# CONFIG_PPS_CLIENT_KTIMER is not set
+# CONFIG_PPS_CLIENT_LDISC is not set
+# CONFIG_PPS_CLIENT_GPIO is not set
+
+#
+# PPS generators support
+#
+
+#
+# PTP clock support
+#
+CONFIG_PTP_1588_CLOCK=y
+
+#
+# Enable PHYLIB and NETWORK_PHY_TIMESTAMPING to see the additional clocks.
+#
+CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y
+CONFIG_GPIOLIB=y
+CONFIG_GPIO_DEVRES=y
+CONFIG_GPIO_ACPI=y
+# CONFIG_DEBUG_GPIO is not set
+CONFIG_GPIO_SYSFS=y
+CONFIG_GPIO_GENERIC=y
+CONFIG_GPIO_MAX730X=y
+
+#
+# Memory mapped GPIO drivers:
+#
+CONFIG_GPIO_GENERIC_PLATFORM=y
+# CONFIG_GPIO_IT8761E is not set
+# CONFIG_GPIO_F7188X is not set
+# CONFIG_GPIO_SCH311X is not set
+CONFIG_GPIO_SCH=y
+# CONFIG_GPIO_ICH is not set
+# CONFIG_GPIO_VX855 is not set
+# CONFIG_GPIO_LYNXPOINT is not set
+
+#
+# I2C GPIO expanders:
+#
+# CONFIG_GPIO_MAX7300 is not set
+# CONFIG_GPIO_MAX732X is not set
+CONFIG_GPIO_PCA953X=y
+# CONFIG_GPIO_PCA953X_IRQ is not set
+CONFIG_GPIO_PCF857X=y
+# CONFIG_GPIO_SX150X is not set
+# CONFIG_GPIO_ADP5588 is not set
+
+#
+# PCI GPIO expanders:
+#
+# CONFIG_GPIO_BT8XX is not set
+# CONFIG_GPIO_AMD8111 is not set
+# CONFIG_GPIO_INTEL_MID is not set
+# CONFIG_GPIO_ML_IOH is not set
+# CONFIG_GPIO_RDC321X is not set
+
+#
+# SPI GPIO expanders:
+#
+CONFIG_GPIO_MAX7301=y
+CONFIG_GPIO_MC33880=y
+
+#
+# AC97 GPIO expanders:
+#
+
+#
+# LPC GPIO expanders:
+#
+
+#
+# MODULbus GPIO expanders:
+#
+
+#
+# USB GPIO expanders:
+#
+# CONFIG_W1 is not set
+CONFIG_POWER_SUPPLY=y
+# CONFIG_POWER_SUPPLY_DEBUG is not set
+# CONFIG_PDA_POWER is not set
+# CONFIG_TEST_POWER is not set
+# CONFIG_BATTERY_DS2780 is not set
+# CONFIG_BATTERY_DS2781 is not set
+# CONFIG_BATTERY_DS2782 is not set
+# CONFIG_BATTERY_SBS is not set
+# CONFIG_BATTERY_BQ27x00 is not set
+# CONFIG_BATTERY_MAX17040 is not set
+# CONFIG_BATTERY_MAX17042 is not set
+# CONFIG_CHARGER_MAX8903 is not set
+# CONFIG_CHARGER_LP8727 is not set
+# CONFIG_CHARGER_GPIO is not set
+# CONFIG_CHARGER_BQ2415X is not set
+# CONFIG_CHARGER_BQ24190 is not set
+# CONFIG_CHARGER_BQ24735 is not set
+# CONFIG_CHARGER_SMB347 is not set
+# CONFIG_POWER_RESET is not set
+# CONFIG_POWER_AVS is not set
+CONFIG_HWMON=y
+CONFIG_HWMON_VID=y
+# CONFIG_HWMON_DEBUG_CHIP is not set
+
+#
+# Native drivers
+#
+# CONFIG_SENSORS_ABITUGURU is not set
+# CONFIG_SENSORS_ABITUGURU3 is not set
+# CONFIG_SENSORS_AD7314 is not set
+# CONFIG_SENSORS_AD7414 is not set
+# CONFIG_SENSORS_AD7418 is not set
+CONFIG_SENSORS_ADM1021=y
+# CONFIG_SENSORS_ADM1025 is not set
+# CONFIG_SENSORS_ADM1026 is not set
+# CONFIG_SENSORS_ADM1029 is not set
+# CONFIG_SENSORS_ADM1031 is not set
+# CONFIG_SENSORS_ADM9240 is not set
+# CONFIG_SENSORS_ADT7310 is not set
+# CONFIG_SENSORS_ADT7410 is not set
+# CONFIG_SENSORS_ADT7411 is not set
+# CONFIG_SENSORS_ADT7462 is not set
+# CONFIG_SENSORS_ADT7470 is not set
+# CONFIG_SENSORS_ADT7475 is not set
+# CONFIG_SENSORS_ASC7621 is not set
+# CONFIG_SENSORS_K8TEMP is not set
+# CONFIG_SENSORS_K10TEMP is not set
+# CONFIG_SENSORS_FAM15H_POWER is not set
+# CONFIG_SENSORS_APPLESMC is not set
+# CONFIG_SENSORS_ASB100 is not set
+# CONFIG_SENSORS_ATXP1 is not set
+# CONFIG_SENSORS_DS620 is not set
+# CONFIG_SENSORS_DS1621 is not set
+# CONFIG_SENSORS_I5K_AMB is not set
+# CONFIG_SENSORS_F71805F is not set
+# CONFIG_SENSORS_F71882FG is not set
+# CONFIG_SENSORS_F75375S is not set
+# CONFIG_SENSORS_FSCHMD is not set
+# CONFIG_SENSORS_GL518SM is not set
+# CONFIG_SENSORS_GL520SM is not set
+# CONFIG_SENSORS_G760A is not set
+# CONFIG_SENSORS_G762 is not set
+CONFIG_SENSORS_GPIO_FAN=y
+# CONFIG_SENSORS_HIH6130 is not set
+CONFIG_SENSORS_CORETEMP=y
+# CONFIG_SENSORS_IT87 is not set
+# CONFIG_SENSORS_JC42 is not set
+# CONFIG_SENSORS_LINEAGE is not set
+# CONFIG_SENSORS_LTC2945 is not set
+CONFIG_SENSORS_LTC4151=y
+CONFIG_SENSORS_LTC4215=y
+# CONFIG_SENSORS_LTC4222 is not set
+CONFIG_SENSORS_LTC4245=y
+# CONFIG_SENSORS_LTC4260 is not set
+CONFIG_SENSORS_LTC4261=y
+# CONFIG_SENSORS_MAX1111 is not set
+# CONFIG_SENSORS_MAX16065 is not set
+# CONFIG_SENSORS_MAX1619 is not set
+# CONFIG_SENSORS_MAX1668 is not set
+# CONFIG_SENSORS_MAX197 is not set
+# CONFIG_SENSORS_MAX6639 is not set
+# CONFIG_SENSORS_MAX6642 is not set
+CONFIG_SENSORS_MAX6650=y
+CONFIG_SENSORS_MAX6620=y
+# CONFIG_SENSORS_MAX6697 is not set
+# CONFIG_SENSORS_HTU21 is not set
+# CONFIG_SENSORS_MCP3021 is not set
+# CONFIG_SENSORS_ADCXX is not set
+# CONFIG_SENSORS_LM63 is not set
+# CONFIG_SENSORS_LM70 is not set
+# CONFIG_SENSORS_LM73 is not set
+CONFIG_SENSORS_LM75=y
+# CONFIG_SENSORS_LM77 is not set
+# CONFIG_SENSORS_LM78 is not set
+# CONFIG_SENSORS_LM80 is not set
+# CONFIG_SENSORS_LM83 is not set
+CONFIG_SENSORS_LM85=y
+# CONFIG_SENSORS_LM87 is not set
+CONFIG_SENSORS_LM90=y
+# CONFIG_SENSORS_LM92 is not set
+# CONFIG_SENSORS_LM93 is not set
+# CONFIG_SENSORS_LM95234 is not set
+# CONFIG_SENSORS_LM95241 is not set
+# CONFIG_SENSORS_LM95245 is not set
+# CONFIG_SENSORS_PC87360 is not set
+# CONFIG_SENSORS_PC87427 is not set
+# CONFIG_SENSORS_NTC_THERMISTOR is not set
+# CONFIG_SENSORS_NCT6683 is not set
+# CONFIG_SENSORS_NCT6775 is not set
+# CONFIG_SENSORS_PCF8591 is not set
+CONFIG_PMBUS=y
+CONFIG_SENSORS_PMBUS=y
+# CONFIG_SENSORS_ADM1275 is not set
+# CONFIG_SENSORS_LM25066 is not set
+# CONFIG_SENSORS_LTC2978 is not set
+# CONFIG_SENSORS_MAX16064 is not set
+# CONFIG_SENSORS_MAX34440 is not set
+CONFIG_SENSORS_DNI_DPS460=y
+# CONFIG_SENSORS_MAX8688 is not set
+# CONFIG_SENSORS_UCD9000 is not set
+CONFIG_SENSORS_UCD9200=y
+# CONFIG_SENSORS_ZL6100 is not set
+# CONFIG_SENSORS_SHT15 is not set
+# CONFIG_SENSORS_SHT21 is not set
+# CONFIG_SENSORS_SHTC1 is not set
+# CONFIG_SENSORS_SIS5595 is not set
+# CONFIG_SENSORS_DME1737 is not set
+# CONFIG_SENSORS_EMC1403 is not set
+# CONFIG_SENSORS_EMC2103 is not set
+# CONFIG_SENSORS_EMC6W201 is not set
+# CONFIG_SENSORS_SMSC47M1 is not set
+# CONFIG_SENSORS_SMSC47M192 is not set
+# CONFIG_SENSORS_SMSC47B397 is not set
+# CONFIG_SENSORS_SCH56XX_COMMON is not set
+# CONFIG_SENSORS_SMM665 is not set
+# CONFIG_SENSORS_ADC128D818 is not set
+# CONFIG_SENSORS_ADS1015 is not set
+# CONFIG_SENSORS_ADS7828 is not set
+# CONFIG_SENSORS_ADS7871 is not set
+# CONFIG_SENSORS_AMC6821 is not set
+# CONFIG_SENSORS_INA209 is not set
+# CONFIG_SENSORS_INA2XX is not set
+# CONFIG_SENSORS_THMC50 is not set
+# CONFIG_SENSORS_TMP102 is not set
+# CONFIG_SENSORS_TMP401 is not set
+# CONFIG_SENSORS_TMP421 is not set
+# CONFIG_SENSORS_VIA_CPUTEMP is not set
+# CONFIG_SENSORS_VIA686A is not set
+# CONFIG_SENSORS_VT1211 is not set
+# CONFIG_SENSORS_VT8231 is not set
+CONFIG_SENSORS_W83781D=y
+# CONFIG_SENSORS_W83791D is not set
+# CONFIG_SENSORS_W83792D is not set
+# CONFIG_SENSORS_W83793 is not set
+# CONFIG_SENSORS_W83795 is not set
+# CONFIG_SENSORS_W83L785TS is not set
+# CONFIG_SENSORS_W83L786NG is not set
+# CONFIG_SENSORS_W83627HF is not set
+# CONFIG_SENSORS_W83627EHF is not set
+
+#
+# ACPI drivers
+#
+# CONFIG_SENSORS_ACPI_POWER is not set
+# CONFIG_SENSORS_ATK0110 is not set
+CONFIG_THERMAL=y
+CONFIG_THERMAL_HWMON=y
+CONFIG_THERMAL_DEFAULT_GOV_STEP_WISE=y
+# CONFIG_THERMAL_DEFAULT_GOV_FAIR_SHARE is not set
+# CONFIG_THERMAL_DEFAULT_GOV_USER_SPACE is not set
+# CONFIG_THERMAL_GOV_FAIR_SHARE is not set
+CONFIG_THERMAL_GOV_STEP_WISE=y
+CONFIG_THERMAL_GOV_USER_SPACE=y
+# CONFIG_THERMAL_EMULATION is not set
+# CONFIG_INTEL_POWERCLAMP is not set
+CONFIG_X86_PKG_TEMP_THERMAL=m
+# CONFIG_ACPI_INT3403_THERMAL is not set
+# CONFIG_INTEL_SOC_DTS_THERMAL is not set
+
+#
+# Texas Instruments thermal drivers
+#
+# CONFIG_WATCHDOG is not set
+CONFIG_SSB_POSSIBLE=y
+
+#
+# Sonics Silicon Backplane
+#
+CONFIG_SSB=y
+CONFIG_SSB_SPROM=y
+CONFIG_SSB_PCIHOST_POSSIBLE=y
+CONFIG_SSB_PCIHOST=y
+# CONFIG_SSB_B43_PCI_BRIDGE is not set
+CONFIG_SSB_PCMCIAHOST_POSSIBLE=y
+CONFIG_SSB_PCMCIAHOST=y
+CONFIG_SSB_SDIOHOST_POSSIBLE=y
+CONFIG_SSB_SDIOHOST=y
+# CONFIG_SSB_SILENT is not set
+# CONFIG_SSB_DEBUG is not set
+CONFIG_SSB_DRIVER_PCICORE_POSSIBLE=y
+CONFIG_SSB_DRIVER_PCICORE=y
+# CONFIG_SSB_DRIVER_GPIO is not set
+CONFIG_BCMA_POSSIBLE=y
+
+#
+# Broadcom specific AMBA
+#
+CONFIG_BCMA=y
+CONFIG_BCMA_HOST_PCI_POSSIBLE=y
+CONFIG_BCMA_HOST_PCI=y
+# CONFIG_BCMA_HOST_SOC is not set
+# CONFIG_BCMA_DRIVER_GMAC_CMN is not set
+# CONFIG_BCMA_DRIVER_GPIO is not set
+# CONFIG_BCMA_DEBUG is not set
+
+#
+# Multifunction device drivers
+#
+CONFIG_MFD_CORE=y
+# CONFIG_MFD_CS5535 is not set
+# CONFIG_MFD_AS3711 is not set
+# CONFIG_PMIC_ADP5520 is not set
+# CONFIG_MFD_AAT2870_CORE is not set
+# CONFIG_MFD_BCM590XX is not set
+# CONFIG_MFD_AXP20X is not set
+# CONFIG_MFD_CROS_EC is not set
+# CONFIG_PMIC_DA903X is not set
+# CONFIG_MFD_DA9052_SPI is not set
+# CONFIG_MFD_DA9052_I2C is not set
+# CONFIG_MFD_DA9055 is not set
+# CONFIG_MFD_DA9063 is not set
+# CONFIG_MFD_MC13XXX_SPI is not set
+# CONFIG_MFD_MC13XXX_I2C is not set
+# CONFIG_HTC_PASIC3 is not set
+# CONFIG_HTC_I2CPLD is not set
+# CONFIG_LPC_ICH is not set
+CONFIG_LPC_SCH=y
+# CONFIG_MFD_JANZ_CMODIO is not set
+# CONFIG_MFD_KEMPLD is not set
+# CONFIG_MFD_88PM800 is not set
+# CONFIG_MFD_88PM805 is not set
+# CONFIG_MFD_88PM860X is not set
+# CONFIG_MFD_MAX14577 is not set
+# CONFIG_MFD_MAX77686 is not set
+# CONFIG_MFD_MAX77693 is not set
+# CONFIG_MFD_MAX8907 is not set
+# CONFIG_MFD_MAX8925 is not set
+# CONFIG_MFD_MAX8997 is not set
+# CONFIG_MFD_MAX8998 is not set
+# CONFIG_EZX_PCAP is not set
+# CONFIG_MFD_VIPERBOARD is not set
+# CONFIG_MFD_RETU is not set
+# CONFIG_MFD_PCF50633 is not set
+# CONFIG_MFD_RDC321X is not set
+# CONFIG_MFD_RTSX_PCI is not set
+# CONFIG_MFD_RTSX_USB is not set
+# CONFIG_MFD_RC5T583 is not set
+# CONFIG_MFD_SEC_CORE is not set
+# CONFIG_MFD_SI476X_CORE is not set
+# CONFIG_MFD_SM501 is not set
+# CONFIG_MFD_SMSC is not set
+# CONFIG_ABX500_CORE is not set
+# CONFIG_MFD_SYSCON is not set
+# CONFIG_MFD_TI_AM335X_TSCADC is not set
+# CONFIG_MFD_LP3943 is not set
+# CONFIG_MFD_LP8788 is not set
+# CONFIG_MFD_PALMAS is not set
+# CONFIG_TPS6105X is not set
+# CONFIG_TPS65010 is not set
+# CONFIG_TPS6507X is not set
+# CONFIG_MFD_TPS65090 is not set
+# CONFIG_MFD_TPS65217 is not set
+# CONFIG_MFD_TPS65218 is not set
+# CONFIG_MFD_TPS6586X is not set
+# CONFIG_MFD_TPS65910 is not set
+# CONFIG_MFD_TPS65912 is not set
+# CONFIG_MFD_TPS65912_I2C is not set
+# CONFIG_MFD_TPS65912_SPI is not set
+# CONFIG_MFD_TPS80031 is not set
+# CONFIG_TWL4030_CORE is not set
+# CONFIG_TWL6040_CORE is not set
+CONFIG_MFD_WL1273_CORE=y
+# CONFIG_MFD_LM3533 is not set
+# CONFIG_MFD_TIMBERDALE is not set
+# CONFIG_MFD_TC3589X is not set
+# CONFIG_MFD_TMIO is not set
+# CONFIG_MFD_VX855 is not set
+# CONFIG_MFD_ARIZONA_I2C is not set
+# CONFIG_MFD_ARIZONA_SPI is not set
+# CONFIG_MFD_WM8400 is not set
+# CONFIG_MFD_WM831X_I2C is not set
+# CONFIG_MFD_WM831X_SPI is not set
+# CONFIG_MFD_WM8350_I2C is not set
+# CONFIG_MFD_WM8994 is not set
+# CONFIG_REGULATOR is not set
+# CONFIG_MEDIA_SUPPORT is not set
+
+#
+# Graphics support
+#
+# CONFIG_AGP is not set
+# CONFIG_VGA_ARB is not set
+# CONFIG_VGA_SWITCHEROO is not set
+
+#
+# Direct Rendering Manager
+#
+# CONFIG_DRM is not set
+
+#
+# Frame buffer Devices
+#
+# CONFIG_FB is not set
+# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
+# CONFIG_VGASTATE is not set
+
+#
+# Console display driver support
+#
+CONFIG_VGA_CONSOLE=y
+# CONFIG_VGACON_SOFT_SCROLLBACK is not set
+CONFIG_DUMMY_CONSOLE=y
+# CONFIG_SOUND is not set
+
+#
+# HID support
+#
+CONFIG_HID=y
+# CONFIG_HID_BATTERY_STRENGTH is not set
+# CONFIG_HIDRAW is not set
+# CONFIG_UHID is not set
+CONFIG_HID_GENERIC=y
+
+#
+# Special HID drivers
+#
+# CONFIG_HID_A4TECH is not set
+# CONFIG_HID_ACRUX is not set
+# CONFIG_HID_APPLE is not set
+# CONFIG_HID_APPLEIR is not set
+# CONFIG_HID_AUREAL is not set
+# CONFIG_HID_BELKIN is not set
+# CONFIG_HID_CHERRY is not set
+# CONFIG_HID_CHICONY is not set
+# CONFIG_HID_CP2112 is not set
+# CONFIG_HID_CYPRESS is not set
+# CONFIG_HID_DRAGONRISE is not set
+# CONFIG_HID_EMS_FF is not set
+# CONFIG_HID_ELECOM is not set
+# CONFIG_HID_ELO is not set
+# CONFIG_HID_EZKEY is not set
+# CONFIG_HID_HOLTEK is not set
+# CONFIG_HID_HUION is not set
+# CONFIG_HID_KEYTOUCH is not set
+# CONFIG_HID_KYE is not set
+# CONFIG_HID_UCLOGIC is not set
+# CONFIG_HID_WALTOP is not set
+# CONFIG_HID_GYRATION is not set
+# CONFIG_HID_ICADE is not set
+# CONFIG_HID_TWINHAN is not set
+# CONFIG_HID_KENSINGTON is not set
+# CONFIG_HID_LCPOWER is not set
+# CONFIG_HID_LENOVO_TPKBD is not set
+# CONFIG_HID_LOGITECH is not set
+# CONFIG_HID_MAGICMOUSE is not set
+# CONFIG_HID_MICROSOFT is not set
+# CONFIG_HID_MONTEREY is not set
+# CONFIG_HID_MULTITOUCH is not set
+# CONFIG_HID_NTRIG is not set
+# CONFIG_HID_ORTEK is not set
+# CONFIG_HID_PANTHERLORD is not set
+# CONFIG_HID_PETALYNX is not set
+# CONFIG_HID_PICOLCD is not set
+# CONFIG_HID_PRIMAX is not set
+# CONFIG_HID_ROCCAT is not set
+# CONFIG_HID_SAITEK is not set
+# CONFIG_HID_SAMSUNG is not set
+# CONFIG_HID_SONY is not set
+# CONFIG_HID_SPEEDLINK is not set
+# CONFIG_HID_STEELSERIES is not set
+# CONFIG_HID_SUNPLUS is not set
+# CONFIG_HID_RMI is not set
+# CONFIG_HID_GREENASIA is not set
+# CONFIG_HID_SMARTJOYPLUS is not set
+# CONFIG_HID_TIVO is not set
+# CONFIG_HID_TOPSEED is not set
+# CONFIG_HID_THINGM is not set
+# CONFIG_HID_THRUSTMASTER is not set
+# CONFIG_HID_WACOM is not set
+# CONFIG_HID_WIIMOTE is not set
+# CONFIG_HID_XINMO is not set
+# CONFIG_HID_ZEROPLUS is not set
+# CONFIG_HID_ZYDACRON is not set
+# CONFIG_HID_SENSOR_HUB is not set
+
+#
+# USB HID support
+#
+CONFIG_USB_HID=y
+# CONFIG_HID_PID is not set
+# CONFIG_USB_HIDDEV is not set
+
+#
+# I2C HID support
+#
+# CONFIG_I2C_HID is not set
+CONFIG_USB_OHCI_LITTLE_ENDIAN=y
+CONFIG_USB_SUPPORT=y
+CONFIG_USB_COMMON=y
+CONFIG_USB_ARCH_HAS_HCD=y
+CONFIG_USB=y
+CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
+
+#
+# Miscellaneous USB options
+#
+CONFIG_USB_DEFAULT_PERSIST=y
+# CONFIG_USB_DYNAMIC_MINORS is not set
+# CONFIG_USB_OTG_WHITELIST is not set
+# CONFIG_USB_OTG_BLACKLIST_HUB is not set
+# CONFIG_USB_OTG_FSM is not set
+# CONFIG_USB_MON is not set
+# CONFIG_USB_WUSB_CBAF is not set
+
+#
+# USB Host Controller Drivers
+#
+# CONFIG_USB_C67X00_HCD is not set
+CONFIG_USB_XHCI_HCD=y
+# CONFIG_USB_XHCI_PLATFORM is not set
+CONFIG_USB_EHCI_HCD=y
+CONFIG_USB_EHCI_ROOT_HUB_TT=y
+CONFIG_USB_EHCI_TT_NEWSCHED=y
+CONFIG_USB_EHCI_PCI=y
+# CONFIG_USB_EHCI_HCD_PLATFORM is not set
+# CONFIG_USB_OXU210HP_HCD is not set
+# CONFIG_USB_ISP116X_HCD is not set
+# CONFIG_USB_ISP1760_HCD is not set
+# CONFIG_USB_ISP1362_HCD is not set
+# CONFIG_USB_FUSBH200_HCD is not set
+# CONFIG_USB_FOTG210_HCD is not set
+# CONFIG_USB_MAX3421_HCD is not set
+CONFIG_USB_OHCI_HCD=y
+CONFIG_USB_OHCI_HCD_PCI=y
+# CONFIG_USB_OHCI_HCD_SSB is not set
+# CONFIG_USB_OHCI_HCD_PLATFORM is not set
+CONFIG_USB_UHCI_HCD=y
+# CONFIG_USB_SL811_HCD is not set
+# CONFIG_USB_R8A66597_HCD is not set
+# CONFIG_USB_HCD_BCMA is not set
+# CONFIG_USB_HCD_SSB is not set
+# CONFIG_USB_HCD_TEST_MODE is not set
+
+#
+# USB Device Class drivers
+#
+# CONFIG_USB_ACM is not set
+# CONFIG_USB_PRINTER is not set
+# CONFIG_USB_WDM is not set
+# CONFIG_USB_TMC is not set
+
+#
+# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may
+#
+
+#
+# also be needed; see USB_STORAGE Help for more info
+#
+CONFIG_USB_STORAGE=y
+# CONFIG_USB_STORAGE_DEBUG is not set
+# CONFIG_USB_STORAGE_REALTEK is not set
+# CONFIG_USB_STORAGE_DATAFAB is not set
+# CONFIG_USB_STORAGE_FREECOM is not set
+# CONFIG_USB_STORAGE_ISD200 is not set
+# CONFIG_USB_STORAGE_USBAT is not set
+# CONFIG_USB_STORAGE_SDDR09 is not set
+# CONFIG_USB_STORAGE_SDDR55 is not set
+# CONFIG_USB_STORAGE_JUMPSHOT is not set
+# CONFIG_USB_STORAGE_ALAUDA is not set
+# CONFIG_USB_STORAGE_ONETOUCH is not set
+# CONFIG_USB_STORAGE_KARMA is not set
+# CONFIG_USB_STORAGE_CYPRESS_ATACB is not set
+# CONFIG_USB_STORAGE_ENE_UB6250 is not set
+# CONFIG_USB_UAS is not set
+
+#
+# USB Imaging devices
+#
+# CONFIG_USB_MDC800 is not set
+# CONFIG_USB_MICROTEK is not set
+# CONFIG_USB_MUSB_HDRC is not set
+# CONFIG_USB_DWC3 is not set
+# CONFIG_USB_DWC2 is not set
+# CONFIG_USB_CHIPIDEA is not set
+
+#
+# USB port drivers
+#
+CONFIG_USB_SERIAL=y
+CONFIG_USB_SERIAL_CONSOLE=y
+# CONFIG_USB_SERIAL_GENERIC is not set
+# CONFIG_USB_SERIAL_SIMPLE is not set
+# CONFIG_USB_SERIAL_AIRCABLE is not set
+# CONFIG_USB_SERIAL_ARK3116 is not set
+# CONFIG_USB_SERIAL_BELKIN is not set
+# CONFIG_USB_SERIAL_CH341 is not set
+# CONFIG_USB_SERIAL_WHITEHEAT is not set
+# CONFIG_USB_SERIAL_DIGI_ACCELEPORT is not set
+# CONFIG_USB_SERIAL_CP210X is not set
+# CONFIG_USB_SERIAL_CYPRESS_M8 is not set
+# CONFIG_USB_SERIAL_EMPEG is not set
+# CONFIG_USB_SERIAL_FTDI_SIO is not set
+# CONFIG_USB_SERIAL_VISOR is not set
+# CONFIG_USB_SERIAL_IPAQ is not set
+# CONFIG_USB_SERIAL_IR is not set
+# CONFIG_USB_SERIAL_EDGEPORT is not set
+# CONFIG_USB_SERIAL_EDGEPORT_TI is not set
+# CONFIG_USB_SERIAL_F81232 is not set
+# CONFIG_USB_SERIAL_GARMIN is not set
+# CONFIG_USB_SERIAL_IPW is not set
+# CONFIG_USB_SERIAL_IUU is not set
+# CONFIG_USB_SERIAL_KEYSPAN_PDA is not set
+# CONFIG_USB_SERIAL_KEYSPAN is not set
+# CONFIG_USB_SERIAL_KLSI is not set
+# CONFIG_USB_SERIAL_KOBIL_SCT is not set
+# CONFIG_USB_SERIAL_MCT_U232 is not set
+# CONFIG_USB_SERIAL_METRO is not set
+# CONFIG_USB_SERIAL_MOS7720 is not set
+# CONFIG_USB_SERIAL_MOS7840 is not set
+# CONFIG_USB_SERIAL_MXUPORT is not set
+# CONFIG_USB_SERIAL_NAVMAN is not set
+# CONFIG_USB_SERIAL_PL2303 is not set
+# CONFIG_USB_SERIAL_OTI6858 is not set
+# CONFIG_USB_SERIAL_QCAUX is not set
+# CONFIG_USB_SERIAL_QUALCOMM is not set
+# CONFIG_USB_SERIAL_SPCP8X5 is not set
+# CONFIG_USB_SERIAL_SAFE is not set
+# CONFIG_USB_SERIAL_SIERRAWIRELESS is not set
+# CONFIG_USB_SERIAL_SYMBOL is not set
+# CONFIG_USB_SERIAL_TI is not set
+# CONFIG_USB_SERIAL_CYBERJACK is not set
+# CONFIG_USB_SERIAL_XIRCOM is not set
+# CONFIG_USB_SERIAL_OPTION is not set
+# CONFIG_USB_SERIAL_OMNINET is not set
+# CONFIG_USB_SERIAL_OPTICON is not set
+# CONFIG_USB_SERIAL_XSENS_MT is not set
+# CONFIG_USB_SERIAL_WISHBONE is not set
+# CONFIG_USB_SERIAL_ZTE is not set
+# CONFIG_USB_SERIAL_SSU100 is not set
+# CONFIG_USB_SERIAL_QT2 is not set
+# CONFIG_USB_SERIAL_DEBUG is not set
+
+#
+# USB Miscellaneous drivers
+#
+# CONFIG_USB_EMI62 is not set
+# CONFIG_USB_EMI26 is not set
+# CONFIG_USB_ADUTUX is not set
+# CONFIG_USB_SEVSEG is not set
+# CONFIG_USB_RIO500 is not set
+# CONFIG_USB_LEGOTOWER is not set
+# CONFIG_USB_LCD is not set
+# CONFIG_USB_LED is not set
+# CONFIG_USB_CYPRESS_CY7C63 is not set
+# CONFIG_USB_CYTHERM is not set
+# CONFIG_USB_IDMOUSE is not set
+# CONFIG_USB_FTDI_ELAN is not set
+# CONFIG_USB_APPLEDISPLAY is not set
+# CONFIG_USB_SISUSBVGA is not set
+# CONFIG_USB_LD is not set
+# CONFIG_USB_TRANCEVIBRATOR is not set
+# CONFIG_USB_IOWARRIOR is not set
+# CONFIG_USB_TEST is not set
+# CONFIG_USB_EHSET_TEST_FIXTURE is not set
+# CONFIG_USB_ISIGHTFW is not set
+# CONFIG_USB_YUREX is not set
+# CONFIG_USB_EZUSB_FX2 is not set
+# CONFIG_USB_HSIC_USB3503 is not set
+
+#
+# USB Physical Layer drivers
+#
+# CONFIG_USB_PHY is not set
+# CONFIG_NOP_USB_XCEIV is not set
+# CONFIG_SAMSUNG_USB2PHY is not set
+# CONFIG_SAMSUNG_USB3PHY is not set
+# CONFIG_USB_GPIO_VBUS is not set
+# CONFIG_USB_ISP1301 is not set
+# CONFIG_USB_GADGET is not set
+# CONFIG_UWB is not set
+CONFIG_MMC=y
+# CONFIG_MMC_DEBUG is not set
+# CONFIG_MMC_CLKGATE is not set
+
+#
+# MMC/SD/SDIO Card Drivers
+#
+CONFIG_MMC_BLOCK=y
+CONFIG_MMC_BLOCK_MINORS=8
+CONFIG_MMC_BLOCK_BOUNCE=y
+# CONFIG_SDIO_UART is not set
+# CONFIG_MMC_TEST is not set
+
+#
+# MMC/SD/SDIO Host Controller Drivers
+#
+CONFIG_MMC_SDHCI=y
+CONFIG_MMC_SDHCI_PCI=y
+# CONFIG_MMC_RICOH_MMC is not set
+# CONFIG_MMC_SDHCI_ACPI is not set
+CONFIG_MMC_SDHCI_PLTFM=y
+# CONFIG_MMC_WBSD is not set
+# CONFIG_MMC_TIFM_SD is not set
+CONFIG_MMC_SPI=y
+# CONFIG_MMC_SDRICOH_CS is not set
+# CONFIG_MMC_CB710 is not set
+# CONFIG_MMC_VIA_SDMMC is not set
+# CONFIG_MMC_VUB300 is not set
+# CONFIG_MMC_USHC is not set
+# CONFIG_MMC_USDHI6ROL0 is not set
+# CONFIG_MEMSTICK is not set
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+
+#
+# LED drivers
+#
+# CONFIG_LEDS_LM3530 is not set
+# CONFIG_LEDS_LM3642 is not set
+# CONFIG_LEDS_PCA9532 is not set
+# CONFIG_LEDS_GPIO is not set
+# CONFIG_LEDS_LP3944 is not set
+# CONFIG_LEDS_LP5521 is not set
+# CONFIG_LEDS_LP5523 is not set
+# CONFIG_LEDS_LP5562 is not set
+# CONFIG_LEDS_LP8501 is not set
+# CONFIG_LEDS_PCA955X is not set
+# CONFIG_LEDS_PCA963X is not set
+# CONFIG_LEDS_DAC124S085 is not set
+# CONFIG_LEDS_BD2802 is not set
+# CONFIG_LEDS_INTEL_SS4200 is not set
+# CONFIG_LEDS_LT3593 is not set
+# CONFIG_LEDS_TCA6507 is not set
+# CONFIG_LEDS_LM355x is not set
+
+#
+# LED driver for blink(1) USB RGB LED is under Special HID drivers (HID_THINGM)
+#
+# CONFIG_LEDS_BLINKM is not set
+
+#
+# LED Triggers
+#
+CONFIG_LEDS_TRIGGERS=y
+CONFIG_LEDS_TRIGGER_TIMER=y
+# CONFIG_LEDS_TRIGGER_ONESHOT is not set
+# CONFIG_LEDS_TRIGGER_HEARTBEAT is not set
+# CONFIG_LEDS_TRIGGER_BACKLIGHT is not set
+# CONFIG_LEDS_TRIGGER_CPU is not set
+CONFIG_LEDS_TRIGGER_GPIO=y
+# CONFIG_LEDS_TRIGGER_DEFAULT_ON is not set
+
+#
+# iptables trigger is under Netfilter config (LED target)
+#
+# CONFIG_LEDS_TRIGGER_TRANSIENT is not set
+# CONFIG_LEDS_TRIGGER_CAMERA is not set
+# CONFIG_ACCESSIBILITY is not set
+# CONFIG_INFINIBAND is not set
+# CONFIG_EDAC is not set
+CONFIG_RTC_LIB=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_HCTOSYS=y
+CONFIG_RTC_SYSTOHC=y
+CONFIG_RTC_HCTOSYS_DEVICE="rtc0"
+# CONFIG_RTC_DEBUG is not set
+
+#
+# RTC interfaces
+#
+CONFIG_RTC_INTF_SYSFS=y
+CONFIG_RTC_INTF_PROC=y
+CONFIG_RTC_INTF_DEV=y
+# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set
+# CONFIG_RTC_DRV_TEST is not set
+
+#
+# I2C RTC drivers
+#
+CONFIG_RTC_DRV_DS1307=y
+CONFIG_RTC_DRV_DS1374=y
+CONFIG_RTC_DRV_DS1672=y
+CONFIG_RTC_DRV_DS3232=y
+CONFIG_RTC_DRV_MAX6900=y
+CONFIG_RTC_DRV_RS5C372=y
+CONFIG_RTC_DRV_ISL1208=y
+CONFIG_RTC_DRV_ISL12022=y
+# CONFIG_RTC_DRV_ISL12057 is not set
+CONFIG_RTC_DRV_X1205=y
+# CONFIG_RTC_DRV_PCF2127 is not set
+# CONFIG_RTC_DRV_PCF8523 is not set
+CONFIG_RTC_DRV_PCF8563=y
+CONFIG_RTC_DRV_PCF8583=y
+CONFIG_RTC_DRV_M41T80=y
+# CONFIG_RTC_DRV_M41T80_WDT is not set
+CONFIG_RTC_DRV_BQ32K=y
+CONFIG_RTC_DRV_S35390A=y
+CONFIG_RTC_DRV_FM3130=y
+CONFIG_RTC_DRV_RX8581=y
+CONFIG_RTC_DRV_RX8025=y
+# CONFIG_RTC_DRV_EM3027 is not set
+# CONFIG_RTC_DRV_RV3029C2 is not set
+
+#
+# SPI RTC drivers
+#
+# CONFIG_RTC_DRV_M41T93 is not set
+# CONFIG_RTC_DRV_M41T94 is not set
+# CONFIG_RTC_DRV_DS1305 is not set
+# CONFIG_RTC_DRV_DS1343 is not set
+# CONFIG_RTC_DRV_DS1347 is not set
+# CONFIG_RTC_DRV_DS1390 is not set
+# CONFIG_RTC_DRV_MAX6902 is not set
+# CONFIG_RTC_DRV_R9701 is not set
+# CONFIG_RTC_DRV_RS5C348 is not set
+# CONFIG_RTC_DRV_DS3234 is not set
+# CONFIG_RTC_DRV_PCF2123 is not set
+# CONFIG_RTC_DRV_RX4581 is not set
+# CONFIG_RTC_DRV_MCP795 is not set
+
+#
+# Platform RTC drivers
+#
+CONFIG_RTC_DRV_CMOS=y
+CONFIG_RTC_DRV_DS1286=y
+CONFIG_RTC_DRV_DS1511=y
+CONFIG_RTC_DRV_DS1553=y
+CONFIG_RTC_DRV_DS1742=y
+CONFIG_RTC_DRV_STK17TA8=y
+CONFIG_RTC_DRV_M48T86=y
+CONFIG_RTC_DRV_M48T35=y
+CONFIG_RTC_DRV_M48T59=y
+CONFIG_RTC_DRV_MSM6242=y
+CONFIG_RTC_DRV_BQ4802=y
+CONFIG_RTC_DRV_RP5C01=y
+CONFIG_RTC_DRV_V3020=y
+# CONFIG_RTC_DRV_DS2404 is not set
+
+#
+# on-CPU RTC drivers
+#
+# CONFIG_RTC_DRV_MOXART is not set
+# CONFIG_RTC_DRV_XGENE is not set
+
+#
+# HID Sensor RTC drivers
+#
+# CONFIG_RTC_DRV_HID_SENSOR_TIME is not set
+# CONFIG_DMADEVICES is not set
+# CONFIG_AUXDISPLAY is not set
+CONFIG_UIO=y
+# CONFIG_UIO_CIF is not set
+# CONFIG_UIO_PDRV_GENIRQ is not set
+# CONFIG_UIO_DMEM_GENIRQ is not set
+# CONFIG_UIO_AEC is not set
+# CONFIG_UIO_SERCOS3 is not set
+# CONFIG_UIO_PCI_GENERIC is not set
+# CONFIG_UIO_NETX is not set
+# CONFIG_UIO_MF624 is not set
+CONFIG_VIRT_DRIVERS=y
+CONFIG_VIRTIO=y
+
+#
+# Virtio drivers
+#
+CONFIG_VIRTIO_PCI=y
+# CONFIG_VIRTIO_BALLOON is not set
+# CONFIG_VIRTIO_MMIO is not set
+
+#
+# Microsoft Hyper-V guest support
+#
+# CONFIG_STAGING is not set
+CONFIG_X86_PLATFORM_DEVICES=y
+# CONFIG_ACERHDF is not set
+# CONFIG_ASUS_LAPTOP is not set
+# CONFIG_DELL_SMO8800 is not set
+# CONFIG_FUJITSU_TABLET is not set
+# CONFIG_HP_ACCEL is not set
+# CONFIG_HP_WIRELESS is not set
+# CONFIG_THINKPAD_ACPI is not set
+# CONFIG_SENSORS_HDAPS is not set
+# CONFIG_INTEL_MENLOW is not set
+# CONFIG_EEEPC_LAPTOP is not set
+# CONFIG_ACPI_WMI is not set
+# CONFIG_TOPSTAR_LAPTOP is not set
+# CONFIG_TOSHIBA_BT_RFKILL is not set
+# CONFIG_ACPI_CMPC is not set
+# CONFIG_INTEL_IPS is not set
+# CONFIG_IBM_RTL is not set
+# CONFIG_SAMSUNG_Q10 is not set
+# CONFIG_INTEL_RST is not set
+# CONFIG_INTEL_SMARTCONNECT is not set
+# CONFIG_PVPANIC is not set
+# CONFIG_CHROME_PLATFORMS is not set
+
+#
+# SOC (System On Chip) specific Drivers
+#
+
+#
+# Hardware Spinlock drivers
+#
+CONFIG_CLKEVT_I8253=y
+CONFIG_I8253_LOCK=y
+CONFIG_CLKBLD_I8253=y
+# CONFIG_SH_TIMER_CMT is not set
+# CONFIG_SH_TIMER_MTU2 is not set
+# CONFIG_SH_TIMER_TMU is not set
+# CONFIG_EM_TIMER_STI is not set
+# CONFIG_MAILBOX is not set
+CONFIG_IOMMU_SUPPORT=y
+# CONFIG_AMD_IOMMU is not set
+# CONFIG_INTEL_IOMMU is not set
+# CONFIG_IRQ_REMAP is not set
+
+#
+# Remoteproc drivers
+#
+# CONFIG_STE_MODEM_RPROC is not set
+
+#
+# Rpmsg drivers
+#
+# CONFIG_PM_DEVFREQ is not set
+# CONFIG_EXTCON is not set
+# CONFIG_MEMORY is not set
+# CONFIG_IIO is not set
+# CONFIG_NTB is not set
+# CONFIG_VME_BUS is not set
+# CONFIG_PWM is not set
+# CONFIG_IPACK_BUS is not set
+# CONFIG_RESET_CONTROLLER is not set
+# CONFIG_FMC is not set
+
+#
+# PHY Subsystem
+#
+CONFIG_GENERIC_PHY=y
+# CONFIG_BCM_KONA_USB2_PHY is not set
+# CONFIG_PHY_SAMSUNG_USB2 is not set
+# CONFIG_POWERCAP is not set
+# CONFIG_MCB is not set
+# CONFIG_THUNDERBOLT is not set
+
+#
+# Firmware Drivers
+#
+CONFIG_EDD=y
+# CONFIG_EDD_OFF is not set
+CONFIG_FIRMWARE_MEMMAP=y
+CONFIG_DELL_RBU=y
+CONFIG_DCDBAS=y
+CONFIG_DMIID=y
+CONFIG_DMI_SYSFS=y
+CONFIG_DMI_SCAN_MACHINE_NON_EFI_FALLBACK=y
+CONFIG_ISCSI_IBFT_FIND=y
+CONFIG_ISCSI_IBFT=y
+# CONFIG_GOOGLE_FIRMWARE is not set
+
+#
+# File systems
+#
+CONFIG_DCACHE_WORD_ACCESS=y
+CONFIG_EXT2_FS=y
+CONFIG_EXT2_FS_XATTR=y
+CONFIG_EXT2_FS_POSIX_ACL=y
+CONFIG_EXT2_FS_SECURITY=y
+# CONFIG_EXT2_FS_XIP is not set
+CONFIG_EXT3_FS=y
+CONFIG_EXT3_DEFAULTS_TO_ORDERED=y
+CONFIG_EXT3_FS_XATTR=y
+CONFIG_EXT3_FS_POSIX_ACL=y
+CONFIG_EXT3_FS_SECURITY=y
+CONFIG_EXT4_FS=y
+CONFIG_EXT4_FS_POSIX_ACL=y
+CONFIG_EXT4_FS_SECURITY=y
+# CONFIG_EXT4_DEBUG is not set
+CONFIG_JBD=y
+# CONFIG_JBD_DEBUG is not set
+CONFIG_JBD2=y
+# CONFIG_JBD2_DEBUG is not set
+CONFIG_FS_MBCACHE=y
+# CONFIG_REISERFS_FS is not set
+# CONFIG_JFS_FS is not set
+# CONFIG_XFS_FS is not set
+# CONFIG_GFS2_FS is not set
+# CONFIG_OCFS2_FS is not set
+CONFIG_BTRFS_FS=y
+CONFIG_BTRFS_FS_POSIX_ACL=y
+# CONFIG_BTRFS_FS_CHECK_INTEGRITY is not set
+# CONFIG_BTRFS_FS_RUN_SANITY_TESTS is not set
+# CONFIG_BTRFS_DEBUG is not set
+# CONFIG_BTRFS_ASSERT is not set
+# CONFIG_NILFS2_FS is not set
+CONFIG_FS_POSIX_ACL=y
+CONFIG_EXPORTFS=y
+CONFIG_FILE_LOCKING=y
+CONFIG_FSNOTIFY=y
+CONFIG_DNOTIFY=y
+CONFIG_INOTIFY_USER=y
+CONFIG_FANOTIFY=y
+# CONFIG_QUOTA is not set
+# CONFIG_QUOTACTL is not set
+# CONFIG_AUTOFS4_FS is not set
+# CONFIG_FUSE_FS is not set
+CONFIG_OVERLAYFS_FS=y
+
+#
+# Caches
+#
+CONFIG_FSCACHE=y
+CONFIG_FSCACHE_STATS=y
+# CONFIG_FSCACHE_HISTOGRAM is not set
+# CONFIG_FSCACHE_DEBUG is not set
+# CONFIG_FSCACHE_OBJECT_LIST is not set
+CONFIG_CACHEFILES=y
+# CONFIG_CACHEFILES_DEBUG is not set
+# CONFIG_CACHEFILES_HISTOGRAM is not set
+
+#
+# CD-ROM/DVD Filesystems
+#
+CONFIG_ISO9660_FS=y
+CONFIG_JOLIET=y
+CONFIG_ZISOFS=y
+CONFIG_UDF_FS=y
+CONFIG_UDF_NLS=y
+
+#
+# DOS/FAT/NT Filesystems
+#
+CONFIG_FAT_FS=y
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_FAT_DEFAULT_CODEPAGE=437
+CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
+# CONFIG_NTFS_FS is not set
+
+#
+# Pseudo filesystems
+#
+CONFIG_PROC_FS=y
+CONFIG_PROC_KCORE=y
+CONFIG_PROC_VMCORE=y
+CONFIG_PROC_SYSCTL=y
+CONFIG_PROC_PAGE_MONITOR=y
+CONFIG_KERNFS=y
+CONFIG_SYSFS=y
+CONFIG_TMPFS=y
+CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_TMPFS_XATTR=y
+CONFIG_HUGETLBFS=y
+CONFIG_HUGETLB_PAGE=y
+CONFIG_CONFIGFS_FS=y
+CONFIG_MISC_FILESYSTEMS=y
+# CONFIG_ADFS_FS is not set
+# CONFIG_AFFS_FS is not set
+# CONFIG_ECRYPT_FS is not set
+# CONFIG_HFS_FS is not set
+# CONFIG_HFSPLUS_FS is not set
+# CONFIG_BEFS_FS is not set
+# CONFIG_BFS_FS is not set
+# CONFIG_EFS_FS is not set
+# CONFIG_LOGFS is not set
+# CONFIG_CRAMFS is not set
+CONFIG_SQUASHFS=y
+CONFIG_SQUASHFS_FILE_CACHE=y
+# CONFIG_SQUASHFS_FILE_DIRECT is not set
+CONFIG_SQUASHFS_DECOMP_SINGLE=y
+# CONFIG_SQUASHFS_DECOMP_MULTI is not set
+# CONFIG_SQUASHFS_DECOMP_MULTI_PERCPU is not set
+CONFIG_SQUASHFS_XATTR=y
+CONFIG_SQUASHFS_ZLIB=y
+CONFIG_SQUASHFS_LZO=y
+CONFIG_SQUASHFS_XZ=y
+# CONFIG_SQUASHFS_4K_DEVBLK_SIZE is not set
+# CONFIG_SQUASHFS_EMBEDDED is not set
+CONFIG_SQUASHFS_FRAGMENT_CACHE_SIZE=3
+# CONFIG_VXFS_FS is not set
+# CONFIG_MINIX_FS is not set
+# CONFIG_OMFS_FS is not set
+# CONFIG_HPFS_FS is not set
+# CONFIG_QNX4FS_FS is not set
+# CONFIG_QNX6FS_FS is not set
+# CONFIG_ROMFS_FS is not set
+# CONFIG_PSTORE is not set
+# CONFIG_SYSV_FS is not set
+# CONFIG_UFS_FS is not set
+# CONFIG_EXOFS_FS is not set
+# CONFIG_F2FS_FS is not set
+CONFIG_AUFS_FS=y
+CONFIG_AUFS_BRANCH_MAX_127=y
+# CONFIG_AUFS_BRANCH_MAX_511 is not set
+# CONFIG_AUFS_BRANCH_MAX_1023 is not set
+# CONFIG_AUFS_BRANCH_MAX_32767 is not set
+CONFIG_AUFS_SBILIST=y
+# CONFIG_AUFS_HNOTIFY is not set
+# CONFIG_AUFS_EXPORT is not set
+# CONFIG_AUFS_FHSM is not set
+# CONFIG_AUFS_RDU is not set
+# CONFIG_AUFS_SHWH is not set
+# CONFIG_AUFS_BR_RAMFS is not set
+CONFIG_AUFS_BDEV_LOOP=y
+# CONFIG_AUFS_DEBUG is not set
+CONFIG_ORE=y
+CONFIG_NETWORK_FILESYSTEMS=y
+CONFIG_NFS_FS=y
+CONFIG_NFS_V2=y
+CONFIG_NFS_V3=y
+CONFIG_NFS_V3_ACL=y
+CONFIG_NFS_V4=y
+# CONFIG_NFS_SWAP is not set
+CONFIG_NFS_V4_1=y
+# CONFIG_NFS_V4_2 is not set
+CONFIG_PNFS_FILE_LAYOUT=y
+CONFIG_PNFS_BLOCK=y
+CONFIG_PNFS_OBJLAYOUT=y
+CONFIG_NFS_V4_1_IMPLEMENTATION_ID_DOMAIN="kernel.org"
+# CONFIG_NFS_V4_1_MIGRATION is not set
+# CONFIG_NFS_FSCACHE is not set
+# CONFIG_NFS_USE_LEGACY_DNS is not set
+CONFIG_NFS_USE_KERNEL_DNS=y
+CONFIG_NFSD=y
+CONFIG_NFSD_V2_ACL=y
+CONFIG_NFSD_V3=y
+CONFIG_NFSD_V3_ACL=y
+CONFIG_NFSD_V4=y
+# CONFIG_NFSD_FAULT_INJECTION is not set
+CONFIG_LOCKD=y
+CONFIG_LOCKD_V4=y
+CONFIG_NFS_ACL_SUPPORT=y
+CONFIG_NFS_COMMON=y
+CONFIG_SUNRPC=y
+CONFIG_SUNRPC_GSS=y
+CONFIG_SUNRPC_BACKCHANNEL=y
+# CONFIG_RPCSEC_GSS_KRB5 is not set
+# CONFIG_SUNRPC_DEBUG is not set
+# CONFIG_CEPH_FS is not set
+# CONFIG_CIFS is not set
+# CONFIG_NCP_FS is not set
+# CONFIG_CODA_FS is not set
+# CONFIG_AFS_FS is not set
+CONFIG_NLS=y
+CONFIG_NLS_DEFAULT="utf8"
+CONFIG_NLS_CODEPAGE_437=y
+# CONFIG_NLS_CODEPAGE_737 is not set
+# CONFIG_NLS_CODEPAGE_775 is not set
+# CONFIG_NLS_CODEPAGE_850 is not set
+# CONFIG_NLS_CODEPAGE_852 is not set
+# CONFIG_NLS_CODEPAGE_855 is not set
+# CONFIG_NLS_CODEPAGE_857 is not set
+# CONFIG_NLS_CODEPAGE_860 is not set
+# CONFIG_NLS_CODEPAGE_861 is not set
+# CONFIG_NLS_CODEPAGE_862 is not set
+# CONFIG_NLS_CODEPAGE_863 is not set
+# CONFIG_NLS_CODEPAGE_864 is not set
+# CONFIG_NLS_CODEPAGE_865 is not set
+# CONFIG_NLS_CODEPAGE_866 is not set
+# CONFIG_NLS_CODEPAGE_869 is not set
+# CONFIG_NLS_CODEPAGE_936 is not set
+# CONFIG_NLS_CODEPAGE_950 is not set
+# CONFIG_NLS_CODEPAGE_932 is not set
+# CONFIG_NLS_CODEPAGE_949 is not set
+# CONFIG_NLS_CODEPAGE_874 is not set
+# CONFIG_NLS_ISO8859_8 is not set
+# CONFIG_NLS_CODEPAGE_1250 is not set
+# CONFIG_NLS_CODEPAGE_1251 is not set
+CONFIG_NLS_ASCII=y
+CONFIG_NLS_ISO8859_1=y
+# CONFIG_NLS_ISO8859_2 is not set
+# CONFIG_NLS_ISO8859_3 is not set
+# CONFIG_NLS_ISO8859_4 is not set
+# CONFIG_NLS_ISO8859_5 is not set
+# CONFIG_NLS_ISO8859_6 is not set
+# CONFIG_NLS_ISO8859_7 is not set
+# CONFIG_NLS_ISO8859_9 is not set
+# CONFIG_NLS_ISO8859_13 is not set
+# CONFIG_NLS_ISO8859_14 is not set
+# CONFIG_NLS_ISO8859_15 is not set
+# CONFIG_NLS_KOI8_R is not set
+# CONFIG_NLS_KOI8_U is not set
+# CONFIG_NLS_MAC_ROMAN is not set
+# CONFIG_NLS_MAC_CELTIC is not set
+# CONFIG_NLS_MAC_CENTEURO is not set
+# CONFIG_NLS_MAC_CROATIAN is not set
+# CONFIG_NLS_MAC_CYRILLIC is not set
+# CONFIG_NLS_MAC_GAELIC is not set
+# CONFIG_NLS_MAC_GREEK is not set
+# CONFIG_NLS_MAC_ICELAND is not set
+# CONFIG_NLS_MAC_INUIT is not set
+# CONFIG_NLS_MAC_ROMANIAN is not set
+# CONFIG_NLS_MAC_TURKISH is not set
+CONFIG_NLS_UTF8=y
+# CONFIG_DLM is not set
+
+#
+# Kernel hacking
+#
+CONFIG_TRACE_IRQFLAGS_SUPPORT=y
+
+#
+# printk and dmesg options
+#
+# CONFIG_PRINTK_TIME is not set
+CONFIG_DEFAULT_MESSAGE_LOGLEVEL=4
+CONFIG_BOOT_PRINTK_DELAY=y
+# CONFIG_DYNAMIC_DEBUG is not set
+
+#
+# Compile-time checks and compiler options
+#
+CONFIG_DEBUG_INFO=y
+# CONFIG_DEBUG_INFO_REDUCED is not set
+CONFIG_ENABLE_WARN_DEPRECATED=y
+CONFIG_ENABLE_MUST_CHECK=y
+CONFIG_FRAME_WARN=2048
+CONFIG_STRIP_ASM_SYMS=y
+# CONFIG_READABLE_ASM is not set
+CONFIG_UNUSED_SYMBOLS=y
+CONFIG_DEBUG_FS=y
+# CONFIG_HEADERS_CHECK is not set
+# CONFIG_DEBUG_SECTION_MISMATCH is not set
+CONFIG_ARCH_WANT_FRAME_POINTERS=y
+# CONFIG_FRAME_POINTER is not set
+# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_MAGIC_SYSRQ_DEFAULT_ENABLE=0x1
+CONFIG_DEBUG_KERNEL=y
+
+#
+# Memory Debugging
+#
+# CONFIG_DEBUG_PAGEALLOC is not set
+# CONFIG_DEBUG_OBJECTS is not set
+# CONFIG_DEBUG_SLAB is not set
+CONFIG_HAVE_DEBUG_KMEMLEAK=y
+# CONFIG_DEBUG_KMEMLEAK is not set
+# CONFIG_DEBUG_STACK_USAGE is not set
+# CONFIG_DEBUG_VM is not set
+# CONFIG_DEBUG_VIRTUAL is not set
+CONFIG_DEBUG_MEMORY_INIT=y
+# CONFIG_DEBUG_PER_CPU_MAPS is not set
+CONFIG_HAVE_DEBUG_STACKOVERFLOW=y
+# CONFIG_DEBUG_STACKOVERFLOW is not set
+CONFIG_HAVE_ARCH_KMEMCHECK=y
+# CONFIG_DEBUG_SHIRQ is not set
+
+#
+# Debug Lockups and Hangs
+#
+CONFIG_LOCKUP_DETECTOR=y
+CONFIG_HARDLOCKUP_DETECTOR=y
+# CONFIG_BOOTPARAM_HARDLOCKUP_PANIC is not set
+CONFIG_BOOTPARAM_HARDLOCKUP_PANIC_VALUE=0
+# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set
+CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0
+CONFIG_DETECT_HUNG_TASK=y
+CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=120
+# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set
+CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE=0
+# CONFIG_PANIC_ON_OOPS is not set
+CONFIG_PANIC_ON_OOPS_VALUE=0
+CONFIG_PANIC_TIMEOUT=0
+CONFIG_SCHED_DEBUG=y
+# CONFIG_SCHEDSTATS is not set
+CONFIG_TIMER_STATS=y
+
+#
+# Lock Debugging (spinlocks, mutexes, etc...)
+#
+# CONFIG_DEBUG_RT_MUTEXES is not set
+# CONFIG_RT_MUTEX_TESTER is not set
+# CONFIG_DEBUG_SPINLOCK is not set
+# CONFIG_DEBUG_MUTEXES is not set
+# CONFIG_DEBUG_WW_MUTEX_SLOWPATH is not set
+# CONFIG_DEBUG_LOCK_ALLOC is not set
+# CONFIG_PROVE_LOCKING is not set
+# CONFIG_LOCK_STAT is not set
+# CONFIG_DEBUG_ATOMIC_SLEEP is not set
+# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
+# CONFIG_LOCK_TORTURE_TEST is not set
+CONFIG_STACKTRACE=y
+# CONFIG_DEBUG_KOBJECT is not set
+CONFIG_DEBUG_BUGVERBOSE=y
+# CONFIG_DEBUG_LIST is not set
+# CONFIG_DEBUG_PI_LIST is not set
+# CONFIG_DEBUG_SG is not set
+# CONFIG_DEBUG_NOTIFIERS is not set
+# CONFIG_DEBUG_CREDENTIALS is not set
+
+#
+# RCU Debugging
+#
+# CONFIG_SPARSE_RCU_POINTER is not set
+# CONFIG_TORTURE_TEST is not set
+# CONFIG_RCU_TORTURE_TEST is not set
+CONFIG_RCU_CPU_STALL_TIMEOUT=60
+# CONFIG_RCU_CPU_STALL_INFO is not set
+# CONFIG_RCU_TRACE is not set
+# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
+# CONFIG_NOTIFIER_ERROR_INJECTION is not set
+# CONFIG_FAULT_INJECTION is not set
+# CONFIG_LATENCYTOP is not set
+CONFIG_ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS=y
+# CONFIG_DEBUG_STRICT_USER_COPY_CHECKS is not set
+CONFIG_USER_STACKTRACE_SUPPORT=y
+CONFIG_NOP_TRACER=y
+CONFIG_HAVE_FUNCTION_TRACER=y
+CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
+CONFIG_HAVE_FUNCTION_GRAPH_FP_TEST=y
+CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST=y
+CONFIG_HAVE_DYNAMIC_FTRACE=y
+CONFIG_HAVE_DYNAMIC_FTRACE_WITH_REGS=y
+CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y
+CONFIG_HAVE_SYSCALL_TRACEPOINTS=y
+CONFIG_HAVE_FENTRY=y
+CONFIG_HAVE_C_RECORDMCOUNT=y
+CONFIG_TRACE_CLOCK=y
+CONFIG_RING_BUFFER=y
+CONFIG_EVENT_TRACING=y
+CONFIG_CONTEXT_SWITCH_TRACER=y
+CONFIG_TRACING=y
+CONFIG_GENERIC_TRACER=y
+CONFIG_TRACING_SUPPORT=y
+CONFIG_FTRACE=y
+# CONFIG_FUNCTION_TRACER is not set
+# CONFIG_IRQSOFF_TRACER is not set
+# CONFIG_SCHED_TRACER is not set
+# CONFIG_FTRACE_SYSCALLS is not set
+# CONFIG_TRACER_SNAPSHOT is not set
+CONFIG_BRANCH_PROFILE_NONE=y
+# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set
+# CONFIG_PROFILE_ALL_BRANCHES is not set
+# CONFIG_STACK_TRACER is not set
+CONFIG_BLK_DEV_IO_TRACE=y
+# CONFIG_UPROBE_EVENT is not set
+# CONFIG_PROBE_EVENTS is not set
+# CONFIG_FTRACE_STARTUP_TEST is not set
+# CONFIG_MMIOTRACE is not set
+# CONFIG_TRACEPOINT_BENCHMARK is not set
+# CONFIG_RING_BUFFER_BENCHMARK is not set
+# CONFIG_RING_BUFFER_STARTUP_TEST is not set
+
+#
+# Runtime Testing
+#
+# CONFIG_LKDTM is not set
+# CONFIG_TEST_LIST_SORT is not set
+# CONFIG_BACKTRACE_SELF_TEST is not set
+# CONFIG_RBTREE_TEST is not set
+# CONFIG_INTERVAL_TREE_TEST is not set
+# CONFIG_PERCPU_TEST is not set
+# CONFIG_ATOMIC64_SELFTEST is not set
+# CONFIG_TEST_STRING_HELPERS is not set
+# CONFIG_TEST_KSTRTOX is not set
+# CONFIG_PROVIDE_OHCI1394_DMA_INIT is not set
+# CONFIG_DMA_API_DEBUG is not set
+# CONFIG_TEST_MODULE is not set
+# CONFIG_TEST_USER_COPY is not set
+# CONFIG_TEST_BPF is not set
+# CONFIG_SAMPLES is not set
+CONFIG_HAVE_ARCH_KGDB=y
+# CONFIG_KGDB is not set
+CONFIG_STRICT_DEVMEM=y
+CONFIG_X86_VERBOSE_BOOTUP=y
+CONFIG_EARLY_PRINTK=y
+# CONFIG_EARLY_PRINTK_DBGP is not set
+# CONFIG_X86_PTDUMP is not set
+CONFIG_DEBUG_RODATA=y
+# CONFIG_DEBUG_RODATA_TEST is not set
+# CONFIG_DEBUG_SET_MODULE_RONX is not set
+# CONFIG_DEBUG_NX_TEST is not set
+CONFIG_DOUBLEFAULT=y
+# CONFIG_DEBUG_TLBFLUSH is not set
+# CONFIG_IOMMU_DEBUG is not set
+# CONFIG_IOMMU_STRESS is not set
+CONFIG_HAVE_MMIOTRACE_SUPPORT=y
+CONFIG_IO_DELAY_TYPE_0X80=0
+CONFIG_IO_DELAY_TYPE_0XED=1
+CONFIG_IO_DELAY_TYPE_UDELAY=2
+CONFIG_IO_DELAY_TYPE_NONE=3
+CONFIG_IO_DELAY_0X80=y
+# CONFIG_IO_DELAY_0XED is not set
+# CONFIG_IO_DELAY_UDELAY is not set
+# CONFIG_IO_DELAY_NONE is not set
+CONFIG_DEFAULT_IO_DELAY_TYPE=0
+# CONFIG_DEBUG_BOOT_PARAMS is not set
+# CONFIG_CPA_DEBUG is not set
+CONFIG_OPTIMIZE_INLINING=y
+# CONFIG_DEBUG_NMI_SELFTEST is not set
+# CONFIG_X86_DEBUG_STATIC_CPU_HAS is not set
+
+#
+# Security options
+#
+CONFIG_KEYS=y
+# CONFIG_PERSISTENT_KEYRINGS is not set
+# CONFIG_BIG_KEYS is not set
+# CONFIG_ENCRYPTED_KEYS is not set
+# CONFIG_KEYS_DEBUG_PROC_KEYS is not set
+# CONFIG_SECURITY_DMESG_RESTRICT is not set
+# CONFIG_SECURITY is not set
+# CONFIG_SECURITYFS is not set
+CONFIG_DEFAULT_SECURITY_DAC=y
+CONFIG_DEFAULT_SECURITY=""
+CONFIG_XOR_BLOCKS=y
+CONFIG_ASYNC_CORE=y
+CONFIG_ASYNC_XOR=y
+CONFIG_ASYNC_PQ=y
+CONFIG_CRYPTO=y
+
+#
+# Crypto core or helper
+#
+# CONFIG_CRYPTO_FIPS is not set
+CONFIG_CRYPTO_ALGAPI=y
+CONFIG_CRYPTO_ALGAPI2=y
+CONFIG_CRYPTO_AEAD=y
+CONFIG_CRYPTO_AEAD2=y
+CONFIG_CRYPTO_BLKCIPHER=y
+CONFIG_CRYPTO_BLKCIPHER2=y
+CONFIG_CRYPTO_HASH=y
+CONFIG_CRYPTO_HASH2=y
+CONFIG_CRYPTO_RNG=y
+CONFIG_CRYPTO_RNG2=y
+CONFIG_CRYPTO_PCOMP=y
+CONFIG_CRYPTO_PCOMP2=y
+CONFIG_CRYPTO_MANAGER=y
+CONFIG_CRYPTO_MANAGER2=y
+# CONFIG_CRYPTO_USER is not set
+# CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set
+CONFIG_CRYPTO_GF128MUL=y
+CONFIG_CRYPTO_NULL=y
+# CONFIG_CRYPTO_PCRYPT is not set
+CONFIG_CRYPTO_WORKQUEUE=y
+CONFIG_CRYPTO_CRYPTD=y
+CONFIG_CRYPTO_AUTHENC=y
+# CONFIG_CRYPTO_TEST is not set
+CONFIG_CRYPTO_ABLK_HELPER=y
+CONFIG_CRYPTO_GLUE_HELPER_X86=y
+
+#
+# Authenticated Encryption with Associated Data
+#
+CONFIG_CRYPTO_CCM=y
+CONFIG_CRYPTO_GCM=y
+CONFIG_CRYPTO_SEQIV=y
+
+#
+# Block modes
+#
+CONFIG_CRYPTO_CBC=y
+CONFIG_CRYPTO_CTR=y
+CONFIG_CRYPTO_CTS=y
+CONFIG_CRYPTO_ECB=y
+CONFIG_CRYPTO_LRW=y
+CONFIG_CRYPTO_PCBC=y
+CONFIG_CRYPTO_XTS=y
+
+#
+# Hash modes
+#
+# CONFIG_CRYPTO_CMAC is not set
+CONFIG_CRYPTO_HMAC=y
+CONFIG_CRYPTO_XCBC=y
+CONFIG_CRYPTO_VMAC=y
+
+#
+# Digest
+#
+CONFIG_CRYPTO_CRC32C=y
+CONFIG_CRYPTO_CRC32C_INTEL=y
+# CONFIG_CRYPTO_CRC32 is not set
+# CONFIG_CRYPTO_CRC32_PCLMUL is not set
+CONFIG_CRYPTO_CRCT10DIF=y
+# CONFIG_CRYPTO_CRCT10DIF_PCLMUL is not set
+CONFIG_CRYPTO_GHASH=y
+CONFIG_CRYPTO_MD4=y
+CONFIG_CRYPTO_MD5=y
+CONFIG_CRYPTO_MICHAEL_MIC=y
+CONFIG_CRYPTO_RMD128=y
+CONFIG_CRYPTO_RMD160=y
+CONFIG_CRYPTO_RMD256=y
+CONFIG_CRYPTO_RMD320=y
+CONFIG_CRYPTO_SHA1=y
+CONFIG_CRYPTO_SHA1_SSSE3=y
+# CONFIG_CRYPTO_SHA256_SSSE3 is not set
+# CONFIG_CRYPTO_SHA512_SSSE3 is not set
+CONFIG_CRYPTO_SHA256=y
+CONFIG_CRYPTO_SHA512=y
+CONFIG_CRYPTO_TGR192=y
+CONFIG_CRYPTO_WP512=y
+CONFIG_CRYPTO_GHASH_CLMUL_NI_INTEL=y
+
+#
+# Ciphers
+#
+CONFIG_CRYPTO_AES=y
+CONFIG_CRYPTO_AES_X86_64=y
+CONFIG_CRYPTO_AES_NI_INTEL=y
+CONFIG_CRYPTO_ANUBIS=y
+CONFIG_CRYPTO_ARC4=y
+CONFIG_CRYPTO_BLOWFISH=y
+CONFIG_CRYPTO_BLOWFISH_COMMON=y
+CONFIG_CRYPTO_BLOWFISH_X86_64=y
+CONFIG_CRYPTO_CAMELLIA=y
+# CONFIG_CRYPTO_CAMELLIA_X86_64 is not set
+# CONFIG_CRYPTO_CAMELLIA_AESNI_AVX_X86_64 is not set
+# CONFIG_CRYPTO_CAMELLIA_AESNI_AVX2_X86_64 is not set
+CONFIG_CRYPTO_CAST_COMMON=y
+CONFIG_CRYPTO_CAST5=y
+# CONFIG_CRYPTO_CAST5_AVX_X86_64 is not set
+CONFIG_CRYPTO_CAST6=y
+# CONFIG_CRYPTO_CAST6_AVX_X86_64 is not set
+CONFIG_CRYPTO_DES=y
+CONFIG_CRYPTO_FCRYPT=y
+CONFIG_CRYPTO_KHAZAD=y
+CONFIG_CRYPTO_SALSA20=y
+CONFIG_CRYPTO_SALSA20_X86_64=y
+CONFIG_CRYPTO_SEED=y
+CONFIG_CRYPTO_SERPENT=y
+# CONFIG_CRYPTO_SERPENT_SSE2_X86_64 is not set
+# CONFIG_CRYPTO_SERPENT_AVX_X86_64 is not set
+# CONFIG_CRYPTO_SERPENT_AVX2_X86_64 is not set
+CONFIG_CRYPTO_TEA=y
+CONFIG_CRYPTO_TWOFISH=y
+CONFIG_CRYPTO_TWOFISH_COMMON=y
+CONFIG_CRYPTO_TWOFISH_X86_64=y
+CONFIG_CRYPTO_TWOFISH_X86_64_3WAY=y
+# CONFIG_CRYPTO_TWOFISH_AVX_X86_64 is not set
+
+#
+# Compression
+#
+CONFIG_CRYPTO_DEFLATE=y
+CONFIG_CRYPTO_ZLIB=y
+CONFIG_CRYPTO_LZO=y
+# CONFIG_CRYPTO_LZ4 is not set
+# CONFIG_CRYPTO_LZ4HC is not set
+
+#
+# Random Number Generation
+#
+CONFIG_CRYPTO_ANSI_CPRNG=y
+CONFIG_CRYPTO_USER_API=y
+CONFIG_CRYPTO_USER_API_HASH=y
+CONFIG_CRYPTO_USER_API_SKCIPHER=y
+CONFIG_CRYPTO_HW=y
+CONFIG_CRYPTO_DEV_PADLOCK=y
+CONFIG_CRYPTO_DEV_PADLOCK_AES=y
+CONFIG_CRYPTO_DEV_PADLOCK_SHA=y
+# CONFIG_CRYPTO_DEV_CCP is not set
+# CONFIG_ASYMMETRIC_KEY_TYPE is not set
+CONFIG_HAVE_KVM=y
+# CONFIG_VIRTUALIZATION is not set
+CONFIG_BINARY_PRINTF=y
+
+#
+# Library routines
+#
+CONFIG_RAID6_PQ=y
+CONFIG_BITREVERSE=y
+CONFIG_GENERIC_STRNCPY_FROM_USER=y
+CONFIG_GENERIC_STRNLEN_USER=y
+CONFIG_GENERIC_NET_UTILS=y
+CONFIG_GENERIC_FIND_FIRST_BIT=y
+CONFIG_GENERIC_PCI_IOMAP=y
+CONFIG_GENERIC_IOMAP=y
+CONFIG_GENERIC_IO=y
+CONFIG_ARCH_USE_CMPXCHG_LOCKREF=y
+CONFIG_CRC_CCITT=y
+CONFIG_CRC16=y
+CONFIG_CRC_T10DIF=y
+CONFIG_CRC_ITU_T=y
+CONFIG_CRC32=y
+# CONFIG_CRC32_SELFTEST is not set
+CONFIG_CRC32_SLICEBY8=y
+# CONFIG_CRC32_SLICEBY4 is not set
+# CONFIG_CRC32_SARWATE is not set
+# CONFIG_CRC32_BIT is not set
+CONFIG_CRC7=y
+CONFIG_LIBCRC32C=y
+CONFIG_CRC8=y
+# CONFIG_AUDIT_ARCH_COMPAT_GENERIC is not set
+# CONFIG_RANDOM32_SELFTEST is not set
+CONFIG_ZLIB_INFLATE=y
+CONFIG_ZLIB_DEFLATE=y
+CONFIG_LZO_COMPRESS=y
+CONFIG_LZO_DECOMPRESS=y
+CONFIG_XZ_DEC=y
+CONFIG_XZ_DEC_X86=y
+CONFIG_XZ_DEC_POWERPC=y
+CONFIG_XZ_DEC_IA64=y
+CONFIG_XZ_DEC_ARM=y
+CONFIG_XZ_DEC_ARMTHUMB=y
+CONFIG_XZ_DEC_SPARC=y
+CONFIG_XZ_DEC_BCJ=y
+# CONFIG_XZ_DEC_TEST is not set
+CONFIG_DECOMPRESS_GZIP=y
+CONFIG_DECOMPRESS_BZIP2=y
+CONFIG_DECOMPRESS_LZMA=y
+CONFIG_DECOMPRESS_XZ=y
+CONFIG_DECOMPRESS_LZO=y
+CONFIG_TEXTSEARCH=y
+CONFIG_TEXTSEARCH_KMP=y
+CONFIG_TEXTSEARCH_BM=y
+CONFIG_TEXTSEARCH_FSM=y
+CONFIG_ASSOCIATIVE_ARRAY=y
+CONFIG_HAS_IOMEM=y
+CONFIG_HAS_IOPORT_MAP=y
+CONFIG_HAS_DMA=y
+CONFIG_CHECK_SIGNATURE=y
+CONFIG_CPU_RMAP=y
+CONFIG_DQL=y
+CONFIG_NLATTR=y
+CONFIG_ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE=y
+CONFIG_AVERAGE=y
+CONFIG_CORDIC=y
+# CONFIG_DDR is not set
+CONFIG_OID_REGISTRY=y
diff --git a/packages/base/any/kernels/3.16-lts/kconfig.mk b/packages/base/any/kernels/3.16-lts/kconfig.mk
new file mode 100644
index 00000000..ddd9b87e
--- /dev/null
+++ b/packages/base/any/kernels/3.16-lts/kconfig.mk
@@ -0,0 +1,30 @@
+############################################################
+#
+#
+# Copyright 2015 Big Switch Networks, Inc.
+#
+# Licensed under the Eclipse Public License, Version 1.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.eclipse.org/legal/epl-v10.html
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
+# either express or implied. See the License for the specific
+# language governing permissions and limitations under the
+# License.
+#
+#
+############################################################
+#
+# 3.16 Kernel Builds
+#
+############################################################
+THIS_DIR := $(abspath $(dir $(lastword $(MAKEFILE_LIST))))
+K_MAJOR_VERSION := 3
+K_PATCH_LEVEL := 16
+K_SUB_LEVEL := 39
+K_SUFFIX :=
+K_PATCH_DIR := $(THIS_DIR)/patches
diff --git a/packages/base/any/kernels/3.16-lts/patches/changelog.patch b/packages/base/any/kernels/3.16-lts/patches/changelog.patch
new file mode 100644
index 00000000..99ef7c6f
--- /dev/null
+++ b/packages/base/any/kernels/3.16-lts/patches/changelog.patch
@@ -0,0 +1,18 @@
+--- debian/changelog 2015-08-04 00:50:04.000000000 +0000
++++ changelog 2015-12-20 04:20:25.032779900 +0000
+@@ -1,3 +1,15 @@
++linux (3.16.7-ckt11-2+acs8u2) acs; urgency=high
++
++ * add driver patches for MLNX SN2700
++
++ -- Guohan Lu Sun, 19 Dec 2015 01:50:04 +0100
++
++linux (3.16.7-ckt11-2+acs8u1) acs; urgency=high
++
++ * add support for S6000
++
++ -- Shuotian Cheng Sun, 19 Dec 2015 01:50:04 +0100
++
+ linux (3.16.7-ckt11-1+deb8u3) jessie-security; urgency=high
+
+ * path_openat(): fix double fput() (CVE-2015-5706)
diff --git a/packages/base/any/kernels/3.16-lts/patches/driver-arista-piix4-mux-patch.patch b/packages/base/any/kernels/3.16-lts/patches/driver-arista-piix4-mux-patch.patch
new file mode 100644
index 00000000..040d6b88
--- /dev/null
+++ b/packages/base/any/kernels/3.16-lts/patches/driver-arista-piix4-mux-patch.patch
@@ -0,0 +1,146 @@
+From f75a16bc0dfc83cf3df1db7ede4d7357e7be5952 Mon Sep 17 00:00:00 2001
+From: Chulei Wu
+Date: Wed, 2 Mar 2016 04:09:53 +0000
+Subject: [PATCH] arista piix4 mux patch
+
+---
+ drivers/i2c/busses/i2c-piix4.c | 63 +++++++++++++++++++++++++++++++++++++-----
+ 1 file changed, 56 insertions(+), 7 deletions(-)
+
+diff --git a/drivers/i2c/busses/i2c-piix4.c b/drivers/i2c/busses/i2c-piix4.c
+index a6f54ba..eafc035 100644
+--- a/drivers/i2c/busses/i2c-piix4.c
++++ b/drivers/i2c/busses/i2c-piix4.c
+@@ -128,6 +128,7 @@ static const struct dmi_system_id piix4_dmi_ibm[] = {
+
+ struct i2c_piix4_adapdata {
+ unsigned short smba;
++ int mux;
+ };
+
+ static int piix4_setup(struct pci_dev *PIIX4_dev,
+@@ -528,6 +529,43 @@ static s32 piix4_access(struct i2c_adapter * adap, u16 addr,
+ return 0;
+ }
+
++static s32 piix4_access_mux(struct i2c_adapter * adap, u16 addr,
++ unsigned short flags, char read_write,
++ u8 command, int size, union i2c_smbus_data * data)
++{
++ static DEFINE_MUTEX(mux_mutex);
++ struct i2c_piix4_adapdata *adapdata = i2c_get_adapdata(adap);
++ int piix4_mux = adapdata->mux;
++ static int last_mux = -1;
++ s32 ret;
++ unsigned short smba_idx = 0xcd6;
++ u8 smb_en = 0x2c;
++ u8 val;
++
++ if ( piix4_mux == -1 ) {
++ return piix4_access(adap, addr, flags, read_write, command, size, data);
++ }
++
++ mutex_lock(&mux_mutex);
++
++ if ( last_mux != piix4_mux ) {
++ /* Select the correct bus mux*/
++ outb_p(smb_en, smba_idx);
++ val = inb_p(smba_idx + 1);
++ val = (val & 0xf9) | (piix4_mux << 1);
++ outb_p(val, smba_idx + 1);
++
++ last_mux = piix4_mux;
++ dev_dbg(&adap->dev, "set mux to 0x%02x\n", piix4_mux);
++ }
++
++ ret = piix4_access(adap, addr, flags, read_write, command, size, data);
++
++ mutex_unlock(&mux_mutex);
++
++ return ret;
++}
++
+ static u32 piix4_func(struct i2c_adapter *adapter)
+ {
+ return I2C_FUNC_SMBUS_QUICK | I2C_FUNC_SMBUS_BYTE |
+@@ -536,7 +574,7 @@ static u32 piix4_func(struct i2c_adapter *adapter)
+ }
+
+ static const struct i2c_algorithm smbus_algorithm = {
+- .smbus_xfer = piix4_access,
++ .smbus_xfer = piix4_access_mux,
+ .functionality = piix4_func,
+ };
+
+@@ -569,7 +607,7 @@ static struct i2c_adapter *piix4_main_adapter;
+ static struct i2c_adapter *piix4_aux_adapter;
+
+ static int piix4_add_adapter(struct pci_dev *dev, unsigned short smba,
+- struct i2c_adapter **padap)
++ struct i2c_adapter **padap, int mux)
+ {
+ struct i2c_adapter *adap;
+ struct i2c_piix4_adapdata *adapdata;
+@@ -593,6 +631,7 @@ static int piix4_add_adapter(struct pci_dev *dev, unsigned short smba,
+ }
+
+ adapdata->smba = smba;
++ adapdata->mux = mux;
+
+ /* set up the sysfs linkage to our parent device */
+ adap->dev.parent = &dev->dev;
+@@ -618,6 +657,8 @@ static int piix4_add_adapter(struct pci_dev *dev, unsigned short smba,
+ static int piix4_probe(struct pci_dev *dev, const struct pci_device_id *id)
+ {
+ int retval;
++ int mux = -1;
++ int aux_smba;
+
+ if ((dev->vendor == PCI_VENDOR_ID_ATI &&
+ dev->device == PCI_DEVICE_ID_ATI_SBX00_SMBUS &&
+@@ -633,7 +674,14 @@ static int piix4_probe(struct pci_dev *dev, const struct pci_device_id *id)
+ return retval;
+
+ /* Try to register main SMBus adapter, give up if we can't */
+- retval = piix4_add_adapter(dev, retval, &piix4_main_adapter);
++ aux_smba = retval;
++ if (dev->vendor == PCI_VENDOR_ID_AMD &&
++ dev->device == PCI_DEVICE_ID_AMD_HUDSON2_SMBUS) {
++ mux = -1;
++ } else {
++ mux = 0;
++ }
++ retval = piix4_add_adapter(dev, retval, &piix4_main_adapter, mux);
+ if (retval < 0)
+ return retval;
+
+@@ -644,21 +692,22 @@ static int piix4_probe(struct pci_dev *dev, const struct pci_device_id *id)
+ dev->device == PCI_DEVICE_ID_ATI_SBX00_SMBUS) {
+ if (dev->revision < 0x40) {
+ retval = piix4_setup_aux(dev, id, 0x58);
++ mux = -1;
+ } else {
+- /* SB800 added aux bus too */
+- retval = piix4_setup_sb800(dev, id, 1);
++ retval = aux_smba;
++ mux = 1;
+ }
+ }
+
+ if (dev->vendor == PCI_VENDOR_ID_AMD &&
+ dev->device == PCI_DEVICE_ID_AMD_HUDSON2_SMBUS) {
+ retval = piix4_setup_sb800(dev, id, 1);
++ mux = -1;
+ }
+-
+ if (retval > 0) {
+ /* Try to add the aux adapter if it exists,
+ * piix4_add_adapter will clean up if this fails */
+- piix4_add_adapter(dev, retval, &piix4_aux_adapter);
++ piix4_add_adapter(dev, retval, &piix4_aux_adapter, mux);
+ }
+
+ return 0;
+--
+2.1.4
+
diff --git a/packages/base/any/kernels/3.16-lts/patches/driver-at24-fix-odd-length-two-byte-access.patch b/packages/base/any/kernels/3.16-lts/patches/driver-at24-fix-odd-length-two-byte-access.patch
new file mode 100644
index 00000000..6060b15e
--- /dev/null
+++ b/packages/base/any/kernels/3.16-lts/patches/driver-at24-fix-odd-length-two-byte-access.patch
@@ -0,0 +1,34 @@
+--- a/drivers/misc/eeprom/at24.c 2016-10-06 12:45:49.290365545 +0000
++++ b/drivers/misc/eeprom/at24.c 2016-10-06 12:47:08.630368526 +0000
+@@ -84,9 +84,9 @@
+ *
+ * This value is forced to be a power of two so that writes align on pages.
+ */
+-static unsigned io_limit = 128;
++static unsigned io_limit = 32;
+ module_param(io_limit, uint, 0);
+-MODULE_PARM_DESC(io_limit, "Maximum bytes per I/O (default 128)");
++MODULE_PARM_DESC(io_limit, "Maximum bytes per I/O (default 32)");
+
+ /*
+ * Specs often allow 5 msec for a page write, sometimes 20 msec;
+@@ -192,7 +192,8 @@
+ count = I2C_SMBUS_BLOCK_MAX;
+ break;
+ case I2C_SMBUS_WORD_DATA:
+- count = 2;
++ /* Check for odd length transaction */
++ count = (count == 1) ? 1 : 2;
+ break;
+ case I2C_SMBUS_BYTE_DATA:
+ count = 1;
+@@ -237,7 +238,8 @@
+ status = i2c_smbus_read_word_data(client, offset);
+ if (status >= 0) {
+ buf[0] = status & 0xff;
+- buf[1] = status >> 8;
++ if (count == 2)
++ buf[1] = status >> 8;
+ status = count;
+ }
+ break;
diff --git a/packages/base/any/kernels/3.16-lts/patches/driver-hwmon-max6620-fix-rpm-calc.patch b/packages/base/any/kernels/3.16-lts/patches/driver-hwmon-max6620-fix-rpm-calc.patch
new file mode 100644
index 00000000..e5401626
--- /dev/null
+++ b/packages/base/any/kernels/3.16-lts/patches/driver-hwmon-max6620-fix-rpm-calc.patch
@@ -0,0 +1,196 @@
+MAX6620 fix rpm calculation accuracy
+
+From: Cumulus Networks
+
+The driver only fills the most significant 8 bits of the fan tach
+count (11 bit value). Fixing the driver to use all of 11 bits for
+more accuracy.
+---
+ drivers/hwmon/max6620.c | 105 +++++++++++++++++++++--------------------------
+ 1 file changed, 46 insertions(+), 59 deletions(-)
+
+diff --git a/drivers/hwmon/max6620.c b/drivers/hwmon/max6620.c
+index 3c337c7..76c1f7f 100644
+--- a/drivers/hwmon/max6620.c
++++ b/drivers/hwmon/max6620.c
+@@ -46,6 +46,8 @@
+
+ /* clock: The clock frequency of the chip the driver should assume */
+ static int clock = 8192;
++static u32 sr = 2;
++static u32 np = 2;
+
+ module_param(clock, int, S_IRUGO);
+
+@@ -213,22 +215,22 @@ static ssize_t get_fan(struct device *dev, struct device_attribute *devattr, cha
+
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+ struct max6620_data *data = max6620_update_device(dev);
+- int rpm;
+-
+- /*
+- * Calculation details:
+- *
+- * Each tachometer counts over an interval given by the "count"
+- * register (0.25, 0.5, 1 or 2 seconds). This module assumes
+- * that the fans produce two pulses per revolution (this seems
+- * to be the most common).
+- */
+- if(data->tach[attr->index] == 0 || data->tach[attr->index] == 255) {
++ struct i2c_client *client = to_i2c_client(dev);
++ u32 rpm = 0;
++ u32 tach = 0;
++ u32 tach1 = 0;
++ u32 tach2 = 0;
++
++ tach1 = i2c_smbus_read_byte_data(client, tach_reg[attr->index]);
++ tach1 = (tach1 << 3) & 0x7f8;
++ tach2 = i2c_smbus_read_byte_data(client, tach_reg[attr->index] + 1);
++ tach2 = (tach2 >> 5) & 0x7;
++ tach = tach1 | tach2;
++ if (tach == 0) {
+ rpm = 0;
+ } else {
+- rpm = ((clock / (data->tach[attr->index] << 3)) * 30 * DIV_FROM_REG(data->fandyn[attr->index]));
++ rpm = (60 * sr * clock)/(tach * np);
+ }
+-
+ return sprintf(buf, "%d\n", rpm);
+ }
+
+@@ -236,22 +238,21 @@ static ssize_t get_target(struct device *dev, struct device_attribute *devattr,
+
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+ struct max6620_data *data = max6620_update_device(dev);
+- int kscale, ktach, rpm;
+-
+- /*
+- * Use the datasheet equation:
+- *
+- * FanSpeed = KSCALE x fCLK / [256 x (KTACH + 1)]
+- *
+- * then multiply by 60 to give rpm.
+- */
+-
+- kscale = DIV_FROM_REG(data->fandyn[attr->index]);
+- ktach = data->target[attr->index];
+- if(ktach == 0) {
++ struct i2c_client *client = to_i2c_client(dev);
++ u32 rpm;
++ u32 target;
++ u32 target1;
++ u32 target2;
++
++ target1 = i2c_smbus_read_byte_data(client, target_reg[attr->index]);
++ target1 = (target1 << 3) & 0x7f8;
++ target2 = i2c_smbus_read_byte_data(client, target_reg[attr->index] + 1);
++ target2 = (target2 >> 5) & 0x7;
++ target = target1 | target2;
++ if (target == 0) {
+ rpm = 0;
+ } else {
+- rpm = ((60 * kscale * clock) / (ktach << 3));
++ rpm = (60 * sr * clock)/(target * np);
+ }
+ return sprintf(buf, "%d\n", rpm);
+ }
+@@ -261,9 +262,11 @@ static ssize_t set_target(struct device *dev, struct device_attribute *devattr,
+ struct i2c_client *client = to_i2c_client(dev);
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+ struct max6620_data *data = i2c_get_clientdata(client);
+- int kscale, ktach;
+- unsigned long rpm;
++ u32 rpm;
+ int err;
++ u32 target;
++ u32 target1;
++ u32 target2;
+
+ err = kstrtoul(buf, 10, &rpm);
+ if (err)
+@@ -271,25 +274,13 @@ static ssize_t set_target(struct device *dev, struct device_attribute *devattr,
+
+ rpm = SENSORS_LIMIT(rpm, FAN_RPM_MIN, FAN_RPM_MAX);
+
+- /*
+- * Divide the required speed by 60 to get from rpm to rps, then
+- * use the datasheet equation:
+- *
+- * KTACH = [(fCLK x KSCALE) / (256 x FanSpeed)] - 1
+- */
+-
+ mutex_lock(&data->update_lock);
+
+- kscale = DIV_FROM_REG(data->fandyn[attr->index]);
+- ktach = ((60 * kscale * clock) / rpm);
+- if (ktach < 0)
+- ktach = 0;
+- if (ktach > 255)
+- ktach = 255;
+- data->target[attr->index] = ktach;
+-
+- i2c_smbus_write_byte_data(client, target_reg[attr->index], data->target[attr->index]);
+- i2c_smbus_write_byte_data(client, target_reg[attr->index]+0x01, 0x00);
++ target = (60 * sr * 8192)/(rpm * np);
++ target1 = (target >> 3) & 0xff;
++ target2 = (target << 5) & 0xe0;
++ i2c_smbus_write_byte_data(client, target_reg[attr->index], target1);
++ i2c_smbus_write_byte_data(client, target_reg[attr->index] + 1, target2);
+
+ mutex_unlock(&data->update_lock);
+
+@@ -609,8 +600,11 @@ static int max6620_init_client(struct i2c_client *client) {
+ }
+
+
+-
+- if (i2c_smbus_write_byte_data(client, MAX6620_REG_CONFIG, config)) {
++ /*
++ * Set bit 4, disable other fans from going full speed on a fail
++ * failure.
++ */
++ if (i2c_smbus_write_byte_data(client, MAX6620_REG_CONFIG, config | 0x10)) {
+ dev_err(&client->dev, "Config write error, aborting.\n");
+ return err;
+ }
+@@ -618,28 +612,21 @@ static int max6620_init_client(struct i2c_client *client) {
+ data->config = config;
+ for (i = 0; i < 4; i++) {
+ data->fancfg[i] = i2c_smbus_read_byte_data(client, config_reg[i]);
+- data->fancfg[i] |= 0x80; // enable TACH monitoring
++ data->fancfg[i] |= 0xa8; // enable TACH monitoring
+ i2c_smbus_write_byte_data(client, config_reg[i], data->fancfg[i]);
+ data->fandyn[i] = i2c_smbus_read_byte_data(client, dyn_reg[i]);
+- data-> fandyn[i] |= 0x1C;
++ /* 2 counts (001) and Rate change 100 (0.125 secs) */
++ data-> fandyn[i] = 0x30;
+ i2c_smbus_write_byte_data(client, dyn_reg[i], data->fandyn[i]);
+ data->tach[i] = i2c_smbus_read_byte_data(client, tach_reg[i]);
+ data->volt[i] = i2c_smbus_read_byte_data(client, volt_reg[i]);
+ data->target[i] = i2c_smbus_read_byte_data(client, target_reg[i]);
+ data->dac[i] = i2c_smbus_read_byte_data(client, dac_reg[i]);
+
+-
+-
+ }
+-
+-
+-
+ return 0;
+ }
+
+-
+-
+-
+ static struct max6620_data *max6620_update_device(struct device *dev)
+ {
+ int i;
+@@ -678,7 +665,7 @@ static struct max6620_data *max6620_update_device(struct device *dev)
+ return data;
+ }
+
+-module_i2c_driver(max6620_driver);
++// module_i2c_driver(max6620_driver);
+
+ static int __init max6620_init(void)
+ {
diff --git a/packages/base/any/kernels/3.16-lts/patches/driver-hwmon-max6620-update.patch b/packages/base/any/kernels/3.16-lts/patches/driver-hwmon-max6620-update.patch
new file mode 100644
index 00000000..b4cfe0cf
--- /dev/null
+++ b/packages/base/any/kernels/3.16-lts/patches/driver-hwmon-max6620-update.patch
@@ -0,0 +1,113 @@
+Update MAX6620 driver to support newer kernel version
+
+From: Shuotian Cheng
+
+
+---
+ drivers/hwmon/max6620.c | 25 +++++++++++--------------
+ 1 file changed, 11 insertions(+), 14 deletions(-)
+
+diff --git a/drivers/hwmon/max6620.c b/drivers/hwmon/max6620.c
+index 76c1f7f..fb49195 100644
+--- a/drivers/hwmon/max6620.c
++++ b/drivers/hwmon/max6620.c
+@@ -183,7 +183,7 @@ static struct i2c_driver max6620_driver = {
+ .name = "max6620",
+ },
+ .probe = max6620_probe,
+- .remove = __devexit_p(max6620_remove),
++ .remove = max6620_remove,
+ .id_table = max6620_id,
+ .address_list = normal_i2c,
+ };
+@@ -231,6 +231,7 @@ static ssize_t get_fan(struct device *dev, struct device_attribute *devattr, cha
+ } else {
+ rpm = (60 * sr * clock)/(tach * np);
+ }
++
+ return sprintf(buf, "%d\n", rpm);
+ }
+
+@@ -262,17 +263,17 @@ static ssize_t set_target(struct device *dev, struct device_attribute *devattr,
+ struct i2c_client *client = to_i2c_client(dev);
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+ struct max6620_data *data = i2c_get_clientdata(client);
+- u32 rpm;
++ unsigned long rpm;
+ int err;
+- u32 target;
+- u32 target1;
+- u32 target2;
++ unsigned long target;
++ unsigned long target1;
++ unsigned long target2;
+
+ err = kstrtoul(buf, 10, &rpm);
+ if (err)
+ return err;
+
+- rpm = SENSORS_LIMIT(rpm, FAN_RPM_MIN, FAN_RPM_MAX);
++ rpm = clamp_val(rpm, FAN_RPM_MIN, FAN_RPM_MAX);
+
+ mutex_lock(&data->update_lock);
+
+@@ -326,7 +327,7 @@ static ssize_t set_pwm(struct device *dev, struct device_attribute *devattr, con
+ if (err)
+ return err;
+
+- pwm = SENSORS_LIMIT(pwm, 0, 255);
++ pwm = clamp_val(pwm, 0, 255);
+
+ mutex_lock(&data->update_lock);
+
+@@ -534,7 +535,7 @@ static struct attribute_group max6620_attr_grp = {
+ * Real code
+ */
+
+-static int __devinit max6620_probe(struct i2c_client *client, const struct i2c_device_id *id) {
++static int max6620_probe(struct i2c_client *client, const struct i2c_device_id *id) {
+
+ struct max6620_data *data;
+ int err;
+@@ -575,7 +576,7 @@ dev_info(&client->dev, "Sysfs entries created\n");
+ return err;
+ }
+
+-static int __devexit max6620_remove(struct i2c_client *client) {
++static int max6620_remove(struct i2c_client *client) {
+
+ struct max6620_data *data = i2c_get_clientdata(client);
+
+@@ -599,7 +600,6 @@ static int max6620_init_client(struct i2c_client *client) {
+ return err;
+ }
+
+-
+ /*
+ * Set bit 4, disable other fans from going full speed on a fail
+ * failure.
+@@ -615,14 +615,13 @@ static int max6620_init_client(struct i2c_client *client) {
+ data->fancfg[i] |= 0xa8; // enable TACH monitoring
+ i2c_smbus_write_byte_data(client, config_reg[i], data->fancfg[i]);
+ data->fandyn[i] = i2c_smbus_read_byte_data(client, dyn_reg[i]);
+- /* 2 counts (001) and Rate change 100 (0.125 secs) */
++ /* 2 counts (001) and Rate change 100 (0.125 secs) */
+ data-> fandyn[i] = 0x30;
+ i2c_smbus_write_byte_data(client, dyn_reg[i], data->fandyn[i]);
+ data->tach[i] = i2c_smbus_read_byte_data(client, tach_reg[i]);
+ data->volt[i] = i2c_smbus_read_byte_data(client, volt_reg[i]);
+ data->target[i] = i2c_smbus_read_byte_data(client, target_reg[i]);
+ data->dac[i] = i2c_smbus_read_byte_data(client, dac_reg[i]);
+-
+ }
+ return 0;
+ }
+@@ -665,8 +664,6 @@ static struct max6620_data *max6620_update_device(struct device *dev)
+ return data;
+ }
+
+-// module_i2c_driver(max6620_driver);
+-
+ static int __init max6620_init(void)
+ {
+ return i2c_add_driver(&max6620_driver);
diff --git a/packages/base/any/kernels/3.16-lts/patches/driver-hwmon-max6620.patch b/packages/base/any/kernels/3.16-lts/patches/driver-hwmon-max6620.patch
new file mode 100644
index 00000000..119c12ee
--- /dev/null
+++ b/packages/base/any/kernels/3.16-lts/patches/driver-hwmon-max6620.patch
@@ -0,0 +1,753 @@
+Driver for MAX6620 Fan sensor
+
+From: Cumulus Networks
+
+
+---
+ drivers/hwmon/Kconfig | 10 +
+ drivers/hwmon/Makefile | 1
+ drivers/hwmon/max6620.c | 702 +++++++++++++++++++++++++++++++++++++++++++++++
+ 3 files changed, 713 insertions(+)
+ create mode 100644 drivers/hwmon/max6620.c
+
+diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
+index 02d3d85..ca38e05 100644
+--- a/drivers/hwmon/Kconfig
++++ b/drivers/hwmon/Kconfig
+@@ -784,6 +784,16 @@ config SENSORS_MAX6650
+ This driver can also be built as a module. If so, the module
+ will be called max6650.
+
++config SENSORS_MAX6620
++ tristate "Maxim MAX6620 sensor chip"
++ depends on I2C
++ help
++ If you say yes here you get support for the MAX6620
++ sensor chips.
++
++ This driver can also be built as a module. If so, the module
++ will be called max6620.
++
+ config SENSORS_MAX6697
+ tristate "Maxim MAX6697 and compatibles"
+ depends on I2C
+diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile
+index 3dc0f02..8837a7b 100644
+--- a/drivers/hwmon/Makefile
++++ b/drivers/hwmon/Makefile
+@@ -111,6 +111,7 @@ obj-$(CONFIG_SENSORS_MAX197) += max197.o
+ obj-$(CONFIG_SENSORS_MAX6639) += max6639.o
+ obj-$(CONFIG_SENSORS_MAX6642) += max6642.o
+ obj-$(CONFIG_SENSORS_MAX6650) += max6650.o
++obj-$(CONFIG_SENSORS_MAX6620) += max6620.o
+ obj-$(CONFIG_SENSORS_MAX6697) += max6697.o
+ obj-$(CONFIG_SENSORS_MC13783_ADC)+= mc13783-adc.o
+ obj-$(CONFIG_SENSORS_MCP3021) += mcp3021.o
+diff --git a/drivers/hwmon/max6620.c b/drivers/hwmon/max6620.c
+new file mode 100644
+index 0000000..3c337c7
+--- /dev/null
++++ b/drivers/hwmon/max6620.c
+@@ -0,0 +1,702 @@
++/*
++ * max6620.c - Linux Kernel module for hardware monitoring.
++ *
++ * (C) 2012 by L. Grunenberg
++ *
++ * based on code written by :
++ * 2007 by Hans J. Koch
++ * John Morris
++ * Copyright (c) 2003 Spirent Communications
++ * and Claus Gindhart
++ *
++ * This module has only been tested with the MAX6620 chip.
++ *
++ * The datasheet was last seen at:
++ *
++ * http://pdfserv.maxim-ic.com/en/ds/MAX6620.pdf
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++ */
++
++#include
++#include
++#include
++#include
++#include
++#include
++#include
++#include
++
++/*
++ * Insmod parameters
++ */
++
++
++/* clock: The clock frequency of the chip the driver should assume */
++static int clock = 8192;
++
++module_param(clock, int, S_IRUGO);
++
++static const unsigned short normal_i2c[] = {0x0a, 0x1a, 0x2a, I2C_CLIENT_END};
++
++/*
++ * MAX 6620 registers
++ */
++
++#define MAX6620_REG_CONFIG 0x00
++#define MAX6620_REG_FAULT 0x01
++#define MAX6620_REG_CONF_FAN0 0x02
++#define MAX6620_REG_CONF_FAN1 0x03
++#define MAX6620_REG_CONF_FAN2 0x04
++#define MAX6620_REG_CONF_FAN3 0x05
++#define MAX6620_REG_DYN_FAN0 0x06
++#define MAX6620_REG_DYN_FAN1 0x07
++#define MAX6620_REG_DYN_FAN2 0x08
++#define MAX6620_REG_DYN_FAN3 0x09
++#define MAX6620_REG_TACH0 0x10
++#define MAX6620_REG_TACH1 0x12
++#define MAX6620_REG_TACH2 0x14
++#define MAX6620_REG_TACH3 0x16
++#define MAX6620_REG_VOLT0 0x18
++#define MAX6620_REG_VOLT1 0x1A
++#define MAX6620_REG_VOLT2 0x1C
++#define MAX6620_REG_VOLT3 0x1E
++#define MAX6620_REG_TAR0 0x20
++#define MAX6620_REG_TAR1 0x22
++#define MAX6620_REG_TAR2 0x24
++#define MAX6620_REG_TAR3 0x26
++#define MAX6620_REG_DAC0 0x28
++#define MAX6620_REG_DAC1 0x2A
++#define MAX6620_REG_DAC2 0x2C
++#define MAX6620_REG_DAC3 0x2E
++
++/*
++ * Config register bits
++ */
++
++#define MAX6620_CFG_RUN 0x80
++#define MAX6620_CFG_POR 0x40
++#define MAX6620_CFG_TIMEOUT 0x20
++#define MAX6620_CFG_FULLFAN 0x10
++#define MAX6620_CFG_OSC 0x08
++#define MAX6620_CFG_WD_MASK 0x06
++#define MAX6620_CFG_WD_2 0x02
++#define MAX6620_CFG_WD_6 0x04
++#define MAX6620_CFG_WD10 0x06
++#define MAX6620_CFG_WD 0x01
++
++
++/*
++ * Failure status register bits
++ */
++
++#define MAX6620_FAIL_TACH0 0x10
++#define MAX6620_FAIL_TACH1 0x20
++#define MAX6620_FAIL_TACH2 0x40
++#define MAX6620_FAIL_TACH3 0x80
++#define MAX6620_FAIL_MASK0 0x01
++#define MAX6620_FAIL_MASK1 0x02
++#define MAX6620_FAIL_MASK2 0x04
++#define MAX6620_FAIL_MASK3 0x08
++
++
++/* Minimum and maximum values of the FAN-RPM */
++#define FAN_RPM_MIN 240
++#define FAN_RPM_MAX 30000
++
++#define DIV_FROM_REG(reg) (1 << ((reg & 0xE0) >> 5))
++
++static int max6620_probe(struct i2c_client *client, const struct i2c_device_id *id);
++static int max6620_init_client(struct i2c_client *client);
++static int max6620_remove(struct i2c_client *client);
++static struct max6620_data *max6620_update_device(struct device *dev);
++
++static const u8 config_reg[] = {
++ MAX6620_REG_CONF_FAN0,
++ MAX6620_REG_CONF_FAN1,
++ MAX6620_REG_CONF_FAN2,
++ MAX6620_REG_CONF_FAN3,
++};
++
++static const u8 dyn_reg[] = {
++ MAX6620_REG_DYN_FAN0,
++ MAX6620_REG_DYN_FAN1,
++ MAX6620_REG_DYN_FAN2,
++ MAX6620_REG_DYN_FAN3,
++};
++
++static const u8 tach_reg[] = {
++ MAX6620_REG_TACH0,
++ MAX6620_REG_TACH1,
++ MAX6620_REG_TACH2,
++ MAX6620_REG_TACH3,
++};
++
++static const u8 volt_reg[] = {
++ MAX6620_REG_VOLT0,
++ MAX6620_REG_VOLT1,
++ MAX6620_REG_VOLT2,
++ MAX6620_REG_VOLT3,
++};
++
++static const u8 target_reg[] = {
++ MAX6620_REG_TAR0,
++ MAX6620_REG_TAR1,
++ MAX6620_REG_TAR2,
++ MAX6620_REG_TAR3,
++};
++
++static const u8 dac_reg[] = {
++ MAX6620_REG_DAC0,
++ MAX6620_REG_DAC1,
++ MAX6620_REG_DAC2,
++ MAX6620_REG_DAC3,
++};
++
++/*
++ * Driver data (common to all clients)
++ */
++
++static const struct i2c_device_id max6620_id[] = {
++ { "max6620", 0 },
++ { }
++};
++MODULE_DEVICE_TABLE(i2c, max6620_id);
++
++static struct i2c_driver max6620_driver = {
++ .class = I2C_CLASS_HWMON,
++ .driver = {
++ .name = "max6620",
++ },
++ .probe = max6620_probe,
++ .remove = __devexit_p(max6620_remove),
++ .id_table = max6620_id,
++ .address_list = normal_i2c,
++};
++
++/*
++ * Client data (each client gets its own)
++ */
++
++struct max6620_data {
++ struct device *hwmon_dev;
++ struct mutex update_lock;
++ int nr_fans;
++ char valid; /* zero until following fields are valid */
++ unsigned long last_updated; /* in jiffies */
++
++ /* register values */
++ u8 speed[4];
++ u8 config;
++ u8 fancfg[4];
++ u8 fandyn[4];
++ u8 tach[4];
++ u8 volt[4];
++ u8 target[4];
++ u8 dac[4];
++ u8 fault;
++};
++
++static ssize_t get_fan(struct device *dev, struct device_attribute *devattr, char *buf) {
++
++ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
++ struct max6620_data *data = max6620_update_device(dev);
++ int rpm;
++
++ /*
++ * Calculation details:
++ *
++ * Each tachometer counts over an interval given by the "count"
++ * register (0.25, 0.5, 1 or 2 seconds). This module assumes
++ * that the fans produce two pulses per revolution (this seems
++ * to be the most common).
++ */
++ if(data->tach[attr->index] == 0 || data->tach[attr->index] == 255) {
++ rpm = 0;
++ } else {
++ rpm = ((clock / (data->tach[attr->index] << 3)) * 30 * DIV_FROM_REG(data->fandyn[attr->index]));
++ }
++
++ return sprintf(buf, "%d\n", rpm);
++}
++
++static ssize_t get_target(struct device *dev, struct device_attribute *devattr, char *buf) {
++
++ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
++ struct max6620_data *data = max6620_update_device(dev);
++ int kscale, ktach, rpm;
++
++ /*
++ * Use the datasheet equation:
++ *
++ * FanSpeed = KSCALE x fCLK / [256 x (KTACH + 1)]
++ *
++ * then multiply by 60 to give rpm.
++ */
++
++ kscale = DIV_FROM_REG(data->fandyn[attr->index]);
++ ktach = data->target[attr->index];
++ if(ktach == 0) {
++ rpm = 0;
++ } else {
++ rpm = ((60 * kscale * clock) / (ktach << 3));
++ }
++ return sprintf(buf, "%d\n", rpm);
++}
++
++static ssize_t set_target(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count) {
++
++ struct i2c_client *client = to_i2c_client(dev);
++ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
++ struct max6620_data *data = i2c_get_clientdata(client);
++ int kscale, ktach;
++ unsigned long rpm;
++ int err;
++
++ err = kstrtoul(buf, 10, &rpm);
++ if (err)
++ return err;
++
++ rpm = SENSORS_LIMIT(rpm, FAN_RPM_MIN, FAN_RPM_MAX);
++
++ /*
++ * Divide the required speed by 60 to get from rpm to rps, then
++ * use the datasheet equation:
++ *
++ * KTACH = [(fCLK x KSCALE) / (256 x FanSpeed)] - 1
++ */
++
++ mutex_lock(&data->update_lock);
++
++ kscale = DIV_FROM_REG(data->fandyn[attr->index]);
++ ktach = ((60 * kscale * clock) / rpm);
++ if (ktach < 0)
++ ktach = 0;
++ if (ktach > 255)
++ ktach = 255;
++ data->target[attr->index] = ktach;
++
++ i2c_smbus_write_byte_data(client, target_reg[attr->index], data->target[attr->index]);
++ i2c_smbus_write_byte_data(client, target_reg[attr->index]+0x01, 0x00);
++
++ mutex_unlock(&data->update_lock);
++
++ return count;
++}
++
++/*
++ * Get/set the fan speed in open loop mode using pwm1 sysfs file.
++ * Speed is given as a relative value from 0 to 255, where 255 is maximum
++ * speed. Note that this is done by writing directly to the chip's DAC,
++ * it won't change the closed loop speed set by fan1_target.
++ * Also note that due to rounding errors it is possible that you don't read
++ * back exactly the value you have set.
++ */
++
++static ssize_t get_pwm(struct device *dev, struct device_attribute *devattr, char *buf) {
++
++ int pwm;
++ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
++ struct max6620_data *data = max6620_update_device(dev);
++
++ /*
++ * Useful range for dac is 0-180 for 12V fans and 0-76 for 5V fans.
++ * Lower DAC values mean higher speeds.
++ */
++ pwm = ((int)data->volt[attr->index]);
++
++ if (pwm < 0)
++ pwm = 0;
++
++ return sprintf(buf, "%d\n", pwm);
++}
++
++static ssize_t set_pwm(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count) {
++
++ struct i2c_client *client = to_i2c_client(dev);
++ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
++ struct max6620_data *data = i2c_get_clientdata(client);
++ unsigned long pwm;
++ int err;
++
++ err = kstrtoul(buf, 10, &pwm);
++ if (err)
++ return err;
++
++ pwm = SENSORS_LIMIT(pwm, 0, 255);
++
++ mutex_lock(&data->update_lock);
++
++ data->dac[attr->index] = pwm;
++
++
++ i2c_smbus_write_byte_data(client, dac_reg[attr->index], data->dac[attr->index]);
++ i2c_smbus_write_byte_data(client, dac_reg[attr->index]+1, 0x00);
++
++ mutex_unlock(&data->update_lock);
++
++ return count;
++}
++
++/*
++ * Get/Set controller mode:
++ * Possible values:
++ * 0 = Fan always on
++ * 1 = Open loop, Voltage is set according to speed, not regulated.
++ * 2 = Closed loop, RPM for all fans regulated by fan1 tachometer
++ */
++
++static ssize_t get_enable(struct device *dev, struct device_attribute *devattr, char *buf) {
++
++ struct max6620_data *data = max6620_update_device(dev);
++ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
++ int mode = (data->fancfg[attr->index] & 0x80 ) >> 7;
++ int sysfs_modes[2] = {1, 2};
++
++ return sprintf(buf, "%d\n", sysfs_modes[mode]);
++}
++
++static ssize_t set_enable(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count) {
++
++ struct i2c_client *client = to_i2c_client(dev);
++ struct max6620_data *data = i2c_get_clientdata(client);
++ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
++ int max6620_modes[3] = {0, 1, 0};
++ unsigned long mode;
++ int err;
++
++ err = kstrtoul(buf, 10, &mode);
++ if (err)
++ return err;
++
++ if (mode > 2)
++ return -EINVAL;
++
++ mutex_lock(&data->update_lock);
++
++ data->fancfg[attr->index] = i2c_smbus_read_byte_data(client, config_reg[attr->index]);
++ data->fancfg[attr->index] = (data->fancfg[attr->index] & ~0x80)
++ | (max6620_modes[mode] << 7);
++
++ i2c_smbus_write_byte_data(client, config_reg[attr->index], data->fancfg[attr->index]);
++
++ mutex_unlock(&data->update_lock);
++
++ return count;
++}
++
++/*
++ * Read/write functions for fan1_div sysfs file. The MAX6620 has no such
++ * divider. We handle this by converting between divider and counttime:
++ *
++ * (counttime == k) <==> (divider == 2^k), k = 0, 1, 2, 3, 4 or 5
++ *
++ * Lower values of k allow to connect a faster fan without the risk of
++ * counter overflow. The price is lower resolution. You can also set counttime
++ * using the module parameter. Note that the module parameter "prescaler" also
++ * influences the behaviour. Unfortunately, there's no sysfs attribute
++ * defined for that. See the data sheet for details.
++ */
++
++static ssize_t get_div(struct device *dev, struct device_attribute *devattr, char *buf) {
++
++ struct max6620_data *data = max6620_update_device(dev);
++ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
++
++ return sprintf(buf, "%d\n", DIV_FROM_REG(data->fandyn[attr->index]));
++}
++
++static ssize_t set_div(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count) {
++
++ struct i2c_client *client = to_i2c_client(dev);
++ struct max6620_data *data = i2c_get_clientdata(client);
++ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
++ unsigned long div;
++ int err;
++ u8 div_bin;
++
++ err = kstrtoul(buf, 10, &div);
++ if (err)
++ return err;
++
++ mutex_lock(&data->update_lock);
++ switch (div) {
++ case 1:
++ div_bin = 0;
++ break;
++ case 2:
++ div_bin = 1;
++ break;
++ case 4:
++ div_bin = 2;
++ break;
++ case 8:
++ div_bin = 3;
++ break;
++ case 16:
++ div_bin = 4;
++ break;
++ case 32:
++ div_bin = 5;
++ break;
++ default:
++ mutex_unlock(&data->update_lock);
++ return -EINVAL;
++ }
++ data->fandyn[attr->index] &= 0x1F;
++ data->fandyn[attr->index] |= div_bin << 5;
++ i2c_smbus_write_byte_data(client, dyn_reg[attr->index], data->fandyn[attr->index]);
++ mutex_unlock(&data->update_lock);
++
++ return count;
++}
++
++/*
++ * Get alarm stati:
++ * Possible values:
++ * 0 = no alarm
++ * 1 = alarm
++ */
++
++static ssize_t get_alarm(struct device *dev, struct device_attribute *devattr, char *buf) {
++
++ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
++ struct max6620_data *data = max6620_update_device(dev);
++ struct i2c_client *client = to_i2c_client(dev);
++ int alarm = 0;
++
++ if (data->fault & (1 << attr->index)) {
++ mutex_lock(&data->update_lock);
++ alarm = 1;
++ data->fault &= ~(1 << attr->index);
++ data->fault |= i2c_smbus_read_byte_data(client,
++ MAX6620_REG_FAULT);
++ mutex_unlock(&data->update_lock);
++ }
++
++ return sprintf(buf, "%d\n", alarm);
++}
++
++static SENSOR_DEVICE_ATTR(fan1_input, S_IRUGO, get_fan, NULL, 0);
++static SENSOR_DEVICE_ATTR(fan2_input, S_IRUGO, get_fan, NULL, 1);
++static SENSOR_DEVICE_ATTR(fan3_input, S_IRUGO, get_fan, NULL, 2);
++static SENSOR_DEVICE_ATTR(fan4_input, S_IRUGO, get_fan, NULL, 3);
++static SENSOR_DEVICE_ATTR(fan1_target, S_IWUSR | S_IRUGO, get_target, set_target, 0);
++static SENSOR_DEVICE_ATTR(fan1_div, S_IWUSR | S_IRUGO, get_div, set_div, 0);
++// static SENSOR_DEVICE_ATTR(pwm1_enable, S_IWUSR | S_IRUGO, get_enable, set_enable, 0);
++static SENSOR_DEVICE_ATTR(pwm1, S_IWUSR | S_IRUGO, get_pwm, set_pwm, 0);
++static SENSOR_DEVICE_ATTR(fan2_target, S_IWUSR | S_IRUGO, get_target, set_target, 1);
++static SENSOR_DEVICE_ATTR(fan2_div, S_IWUSR | S_IRUGO, get_div, set_div, 1);
++// static SENSOR_DEVICE_ATTR(pwm2_enable, S_IWUSR | S_IRUGO, get_enable, set_enable, 1);
++static SENSOR_DEVICE_ATTR(pwm2, S_IWUSR | S_IRUGO, get_pwm, set_pwm, 1);
++static SENSOR_DEVICE_ATTR(fan3_target, S_IWUSR | S_IRUGO, get_target, set_target, 2);
++static SENSOR_DEVICE_ATTR(fan3_div, S_IWUSR | S_IRUGO, get_div, set_div, 2);
++// static SENSOR_DEVICE_ATTR(pwm3_enable, S_IWUSR | S_IRUGO, get_enable, set_enable, 2);
++static SENSOR_DEVICE_ATTR(pwm3, S_IWUSR | S_IRUGO, get_pwm, set_pwm, 2);
++static SENSOR_DEVICE_ATTR(fan4_target, S_IWUSR | S_IRUGO, get_target, set_target, 3);
++static SENSOR_DEVICE_ATTR(fan4_div, S_IWUSR | S_IRUGO, get_div, set_div, 3);
++// static SENSOR_DEVICE_ATTR(pwm4_enable, S_IWUSR | S_IRUGO, get_enable, set_enable, 3);
++static SENSOR_DEVICE_ATTR(pwm4, S_IWUSR | S_IRUGO, get_pwm, set_pwm, 3);
++
++static struct attribute *max6620_attrs[] = {
++ &sensor_dev_attr_fan1_input.dev_attr.attr,
++ &sensor_dev_attr_fan2_input.dev_attr.attr,
++ &sensor_dev_attr_fan3_input.dev_attr.attr,
++ &sensor_dev_attr_fan4_input.dev_attr.attr,
++ &sensor_dev_attr_fan1_target.dev_attr.attr,
++ &sensor_dev_attr_fan1_div.dev_attr.attr,
++// &sensor_dev_attr_pwm1_enable.dev_attr.attr,
++ &sensor_dev_attr_pwm1.dev_attr.attr,
++ &sensor_dev_attr_fan2_target.dev_attr.attr,
++ &sensor_dev_attr_fan2_div.dev_attr.attr,
++// &sensor_dev_attr_pwm2_enable.dev_attr.attr,
++ &sensor_dev_attr_pwm2.dev_attr.attr,
++ &sensor_dev_attr_fan3_target.dev_attr.attr,
++ &sensor_dev_attr_fan3_div.dev_attr.attr,
++// &sensor_dev_attr_pwm3_enable.dev_attr.attr,
++ &sensor_dev_attr_pwm3.dev_attr.attr,
++ &sensor_dev_attr_fan4_target.dev_attr.attr,
++ &sensor_dev_attr_fan4_div.dev_attr.attr,
++// &sensor_dev_attr_pwm4_enable.dev_attr.attr,
++ &sensor_dev_attr_pwm4.dev_attr.attr,
++ NULL
++};
++
++static struct attribute_group max6620_attr_grp = {
++ .attrs = max6620_attrs,
++};
++
++
++/*
++ * Real code
++ */
++
++static int __devinit max6620_probe(struct i2c_client *client, const struct i2c_device_id *id) {
++
++ struct max6620_data *data;
++ int err;
++
++ data = devm_kzalloc(&client->dev, sizeof(struct max6620_data), GFP_KERNEL);
++ if (!data) {
++ dev_err(&client->dev, "out of memory.\n");
++ return -ENOMEM;
++ }
++
++ i2c_set_clientdata(client, data);
++ mutex_init(&data->update_lock);
++ data->nr_fans = id->driver_data;
++
++ /*
++ * Initialize the max6620 chip
++ */
++ dev_info(&client->dev, "About to initialize module\n");
++
++ err = max6620_init_client(client);
++ if (err)
++ return err;
++ dev_info(&client->dev, "Module initialized\n");
++
++ err = sysfs_create_group(&client->dev.kobj, &max6620_attr_grp);
++ if (err)
++ return err;
++dev_info(&client->dev, "Sysfs entries created\n");
++
++ data->hwmon_dev = hwmon_device_register(&client->dev);
++ if (!IS_ERR(data->hwmon_dev))
++ return 0;
++
++ err = PTR_ERR(data->hwmon_dev);
++ dev_err(&client->dev, "error registering hwmon device.\n");
++
++ sysfs_remove_group(&client->dev.kobj, &max6620_attr_grp);
++ return err;
++}
++
++static int __devexit max6620_remove(struct i2c_client *client) {
++
++ struct max6620_data *data = i2c_get_clientdata(client);
++
++ hwmon_device_unregister(data->hwmon_dev);
++
++ sysfs_remove_group(&client->dev.kobj, &max6620_attr_grp);
++ return 0;
++}
++
++static int max6620_init_client(struct i2c_client *client) {
++
++ struct max6620_data *data = i2c_get_clientdata(client);
++ int config;
++ int err = -EIO;
++ int i;
++
++ config = i2c_smbus_read_byte_data(client, MAX6620_REG_CONFIG);
++
++ if (config < 0) {
++ dev_err(&client->dev, "Error reading config, aborting.\n");
++ return err;
++ }
++
++
++
++ if (i2c_smbus_write_byte_data(client, MAX6620_REG_CONFIG, config)) {
++ dev_err(&client->dev, "Config write error, aborting.\n");
++ return err;
++ }
++
++ data->config = config;
++ for (i = 0; i < 4; i++) {
++ data->fancfg[i] = i2c_smbus_read_byte_data(client, config_reg[i]);
++ data->fancfg[i] |= 0x80; // enable TACH monitoring
++ i2c_smbus_write_byte_data(client, config_reg[i], data->fancfg[i]);
++ data->fandyn[i] = i2c_smbus_read_byte_data(client, dyn_reg[i]);
++ data-> fandyn[i] |= 0x1C;
++ i2c_smbus_write_byte_data(client, dyn_reg[i], data->fandyn[i]);
++ data->tach[i] = i2c_smbus_read_byte_data(client, tach_reg[i]);
++ data->volt[i] = i2c_smbus_read_byte_data(client, volt_reg[i]);
++ data->target[i] = i2c_smbus_read_byte_data(client, target_reg[i]);
++ data->dac[i] = i2c_smbus_read_byte_data(client, dac_reg[i]);
++
++
++
++ }
++
++
++
++ return 0;
++}
++
++
++
++
++static struct max6620_data *max6620_update_device(struct device *dev)
++{
++ int i;
++ struct i2c_client *client = to_i2c_client(dev);
++ struct max6620_data *data = i2c_get_clientdata(client);
++
++ mutex_lock(&data->update_lock);
++
++ if (time_after(jiffies, data->last_updated + HZ) || !data->valid) {
++
++ for (i = 0; i < 4; i++) {
++ data->fancfg[i] = i2c_smbus_read_byte_data(client, config_reg[i]);
++ data->fandyn[i] = i2c_smbus_read_byte_data(client, dyn_reg[i]);
++ data->tach[i] = i2c_smbus_read_byte_data(client, tach_reg[i]);
++ data->volt[i] = i2c_smbus_read_byte_data(client, volt_reg[i]);
++ data->target[i] = i2c_smbus_read_byte_data(client, target_reg[i]);
++ data->dac[i] = i2c_smbus_read_byte_data(client, dac_reg[i]);
++ }
++
++
++ /*
++ * Alarms are cleared on read in case the condition that
++ * caused the alarm is removed. Keep the value latched here
++ * for providing the register through different alarm files.
++ */
++ u8 fault_reg;
++ fault_reg = i2c_smbus_read_byte_data(client, MAX6620_REG_FAULT);
++ data->fault |= (fault_reg >> 4) & (fault_reg & 0x0F);
++
++ data->last_updated = jiffies;
++ data->valid = 1;
++ }
++
++ mutex_unlock(&data->update_lock);
++
++ return data;
++}
++
++module_i2c_driver(max6620_driver);
++
++static int __init max6620_init(void)
++{
++ return i2c_add_driver(&max6620_driver);
++}
++module_init(max6620_init);
++
++/**
++ * sht21_init() - clean up driver
++ *
++ * Called when module is removed.
++ */
++static void __exit max6620_exit(void)
++{
++ i2c_del_driver(&max6620_driver);
++}
++module_exit(max6620_exit);
++
++MODULE_AUTHOR("Lucas Grunenberg");
++MODULE_DESCRIPTION("MAX6620 sensor driver");
++MODULE_LICENSE("GPL");
diff --git a/packages/base/any/kernels/3.16-lts/patches/driver-hwmon-pmbus-add-dps460-support.patch b/packages/base/any/kernels/3.16-lts/patches/driver-hwmon-pmbus-add-dps460-support.patch
new file mode 100644
index 00000000..812f619a
--- /dev/null
+++ b/packages/base/any/kernels/3.16-lts/patches/driver-hwmon-pmbus-add-dps460-support.patch
@@ -0,0 +1,78 @@
+enable PMBUS_SKIP_STATUS_CHECK for dps460
+
+From: Vadim Pasternak
+
+Patch for pmbus - includes disabling of PMBus status check through platform data structure.
+This is due to some PMBus don't support the STATUS_CML register, or report communication errors
+for no explicable reason. For such chips, checking the status register must be disabled.
+---
+ drivers/hwmon/pmbus/pmbus.c | 14 ++++++++++++++
+ drivers/hwmon/pmbus/pmbus_core.c | 3 +++
+ 2 files changed, 17 insertions(+)
+
+diff --git a/drivers/hwmon/pmbus/pmbus.c b/drivers/hwmon/pmbus/pmbus.c
+index 7e91700..6dd75fb 100644
+--- a/drivers/hwmon/pmbus/pmbus.c
++++ b/drivers/hwmon/pmbus/pmbus.c
+@@ -25,6 +25,7 @@
+ #include
+ #include
+ #include
++#include
+ #include "pmbus.h"
+
+ /*
+@@ -166,14 +167,26 @@ static int pmbus_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+ {
+ struct pmbus_driver_info *info;
++ struct pmbus_platform_data *pdata = NULL;
++ struct device *dev = &client->dev;
+
+ info = devm_kzalloc(&client->dev, sizeof(struct pmbus_driver_info),
+ GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
++ if (!strncmp(id->name, "dps460", sizeof("dps460"))) {
++ pdata = kzalloc(sizeof(struct pmbus_platform_data), GFP_KERNEL);
++ if (!pdata) {
++ kfree(info);
++ return -ENOMEM;
++ }
++ pdata->flags = PMBUS_SKIP_STATUS_CHECK;
++ }
++
+ info->pages = id->driver_data;
+ info->identify = pmbus_identify;
++ dev->platform_data = pdata;
+
+ return pmbus_do_probe(client, id, info);
+ }
+@@ -195,6 +208,7 @@ static const struct i2c_device_id pmbus_id[] = {
+ {"tps40400", 1},
+ {"tps40422", 2},
+ {"udt020", 1},
++ {"dps460", 1},
+ {}
+ };
+
+diff --git a/drivers/hwmon/pmbus/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c
+index 291d11f..09b123f 100644
+--- a/drivers/hwmon/pmbus/pmbus_core.c
++++ b/drivers/hwmon/pmbus/pmbus_core.c
+@@ -1792,8 +1792,11 @@ EXPORT_SYMBOL_GPL(pmbus_do_probe);
+ int pmbus_do_remove(struct i2c_client *client)
+ {
+ struct pmbus_data *data = i2c_get_clientdata(client);
++ const struct pmbus_platform_data *pdata = dev_get_platdata(&client->dev);
+ hwmon_device_unregister(data->hwmon_dev);
+ kfree(data->group.attrs);
++ if (pdata)
++ kfree(pdata);
+ return 0;
+ }
+ EXPORT_SYMBOL_GPL(pmbus_do_remove);
+--
+2.1.4
+
diff --git a/packages/base/any/kernels/3.16-lts/patches/driver-hwmon-pmbus-dni_dps460-update-pmbus-core.patch b/packages/base/any/kernels/3.16-lts/patches/driver-hwmon-pmbus-dni_dps460-update-pmbus-core.patch
new file mode 100644
index 00000000..38550707
--- /dev/null
+++ b/packages/base/any/kernels/3.16-lts/patches/driver-hwmon-pmbus-dni_dps460-update-pmbus-core.patch
@@ -0,0 +1,96 @@
+Update pmbus_data data structure to meet kernel implementation
+
+From: Shuotian Cheng
+
+The pmbus_data data structure is pasted in the driver.
+Cumulus patch is for kernel 3.2.x.
+Update this data structure to meet current kernel (3.16.x) implementation.
+---
+ drivers/hwmon/pmbus/dni_dps460.c | 42 +++++++++++++++-----------------------
+ 1 file changed, 17 insertions(+), 25 deletions(-)
+
+diff --git a/drivers/hwmon/pmbus/dni_dps460.c b/drivers/hwmon/pmbus/dni_dps460.c
+index c687217..1607b65 100644
+--- a/drivers/hwmon/pmbus/dni_dps460.c
++++ b/drivers/hwmon/pmbus/dni_dps460.c
+@@ -39,41 +39,32 @@ enum chips { dni_dps460 };
+ #define FAN_VALUE_MAX 0x64
+
+ /* Needed to access the mutex. Copied from pmbus_core.c */
+-#define PB_NUM_STATUS_REG (PMBUS_PAGES * 6 + 1)
++#define PB_STATUS_BASE 0
++#define PB_STATUS_VOUT_BASE (PB_STATUS_BASE + PMBUS_PAGES)
++#define PB_STATUS_IOUT_BASE (PB_STATUS_VOUT_BASE + PMBUS_PAGES)
++#define PB_STATUS_FAN_BASE (PB_STATUS_IOUT_BASE + PMBUS_PAGES)
++#define PB_STATUS_FAN34_BASE (PB_STATUS_FAN_BASE + PMBUS_PAGES)
++#define PB_STATUS_TEMP_BASE (PB_STATUS_FAN34_BASE + PMBUS_PAGES)
++#define PB_STATUS_INPUT_BASE (PB_STATUS_TEMP_BASE + PMBUS_PAGES)
++#define PB_STATUS_VMON_BASE (PB_STATUS_INPUT_BASE + 1)
++#define PB_NUM_STATUS_REG (PB_STATUS_VMON_BASE + 1)
+ struct pmbus_data {
++ struct device *dev;
+ struct device *hwmon_dev;
+
+ u32 flags; /* from platform data */
+
+- int exponent; /* linear mode: exponent for output voltages */
++ int exponent[PMBUS_PAGES];
++ /* linear mode: exponent for output voltages */
+
+ const struct pmbus_driver_info *info;
+
+ int max_attributes;
+ int num_attributes;
+- struct attribute **attributes;
+ struct attribute_group group;
++ const struct attribute_group *groups[2];
+
+- /*
+- * Sensors cover both sensor and limit registers.
+- */
+- int max_sensors;
+- int num_sensors;
+ struct pmbus_sensor *sensors;
+- /*
+- * Booleans are used for alarms.
+- * Values are determined from status registers.
+- */
+- int max_booleans;
+- int num_booleans;
+- struct pmbus_boolean *booleans;
+- /*
+- * Labels are used to map generic names (e.g., "in1")
+- * to PMBus specific names (e.g., "vin" or "vout1").
+- */
+- int max_labels;
+- int num_labels;
+- struct pmbus_label *labels;
+
+ struct mutex update_lock;
+ bool valid;
+@@ -84,6 +75,7 @@ struct pmbus_data {
+ * so we keep them all together.
+ */
+ u8 status[PB_NUM_STATUS_REG];
++ u8 status_register;
+
+ u8 currpage;
+ };
+@@ -123,14 +115,14 @@ static ssize_t set_target(struct device *dev, struct device_attribute *devattr,
+ struct i2c_client *client = to_i2c_client(dev);
+ struct pmbus_data *data = i2c_get_clientdata(client);
+ int err;
+- unsigned int val;
+- unsigned int rpm;
++ unsigned long val;
++ unsigned long rpm;
+
+ err = kstrtol(buf, 10, &rpm);
+ if (err)
+ return err;
+
+- rpm = SENSORS_LIMIT(rpm, FAN_RPM_MIN, FAN_RPM_MAX);
++ rpm = clamp_val(rpm, FAN_RPM_MIN, FAN_RPM_MAX);
+
+ mutex_lock(&data->update_lock);
+
diff --git a/packages/base/any/kernels/3.16-lts/patches/driver-hwmon-pmbus-dni_dps460.patch b/packages/base/any/kernels/3.16-lts/patches/driver-hwmon-pmbus-dni_dps460.patch
new file mode 100644
index 00000000..8d93c157
--- /dev/null
+++ b/packages/base/any/kernels/3.16-lts/patches/driver-hwmon-pmbus-dni_dps460.patch
@@ -0,0 +1,304 @@
+Add PMBUS driver for DNI DPS460 Power Supply
+
+From: Cumulus Networks
+
+
+---
+ drivers/hwmon/pmbus/Kconfig | 10 ++
+ drivers/hwmon/pmbus/Makefile | 1
+ drivers/hwmon/pmbus/dni_dps460.c | 253 ++++++++++++++++++++++++++++++++++++++
+ 3 files changed, 264 insertions(+)
+ create mode 100644 drivers/hwmon/pmbus/dni_dps460.c
+
+diff --git a/drivers/hwmon/pmbus/Kconfig b/drivers/hwmon/pmbus/Kconfig
+index ec48945..7d3b1aa 100644
+--- a/drivers/hwmon/pmbus/Kconfig
++++ b/drivers/hwmon/pmbus/Kconfig
+@@ -77,6 +77,16 @@ config SENSORS_MAX34440
+ This driver can also be built as a module. If so, the module will
+ be called max34440.
+
++config SENSORS_DNI_DPS460
++ tristate "Delta DPS460"
++ default n
++ help
++ If you say yes here you get hardware monitoring support for Delta
++ DPS460.
++
++ This driver can also be built as a module. If so, the module will
++ be called dni_dps460.
++
+ config SENSORS_MAX8688
+ tristate "Maxim MAX8688"
+ default n
+diff --git a/drivers/hwmon/pmbus/Makefile b/drivers/hwmon/pmbus/Makefile
+index 5e6c316..767d086 100644
+--- a/drivers/hwmon/pmbus/Makefile
++++ b/drivers/hwmon/pmbus/Makefile
+@@ -9,6 +9,7 @@ obj-$(CONFIG_SENSORS_LM25066) += lm25066.o
+ obj-$(CONFIG_SENSORS_LTC2978) += ltc2978.o
+ obj-$(CONFIG_SENSORS_MAX16064) += max16064.o
+ obj-$(CONFIG_SENSORS_MAX34440) += max34440.o
++obj-$(CONFIG_SENSORS_DNI_DPS460) += dni_dps460.o
+ obj-$(CONFIG_SENSORS_MAX8688) += max8688.o
+ obj-$(CONFIG_SENSORS_UCD9000) += ucd9000.o
+ obj-$(CONFIG_SENSORS_UCD9200) += ucd9200.o
+diff --git a/drivers/hwmon/pmbus/dni_dps460.c b/drivers/hwmon/pmbus/dni_dps460.c
+new file mode 100644
+index 0000000..c687217
+--- /dev/null
++++ b/drivers/hwmon/pmbus/dni_dps460.c
+@@ -0,0 +1,253 @@
++/*
++ * Hardware monitoring driver for Delta DPS460
++ *
++ * Copyright (C) 2014 Cumulus Networks, LLC
++ * Author: Puneet Shenoy
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++ */
++
++#include
++#include
++#include
++#include
++#include
++#include
++#include
++#include
++#include
++#include "pmbus.h"
++
++enum chips { dni_dps460 };
++
++/* Data provided by DELL Inc */
++#define FAN_RPM_MIN 7200
++#define FAN_RPM_MAX 18000
++#define FAN_VALUE_MIN 0x28
++#define FAN_VALUE_MAX 0x64
++
++/* Needed to access the mutex. Copied from pmbus_core.c */
++#define PB_NUM_STATUS_REG (PMBUS_PAGES * 6 + 1)
++struct pmbus_data {
++ struct device *hwmon_dev;
++
++ u32 flags; /* from platform data */
++
++ int exponent; /* linear mode: exponent for output voltages */
++
++ const struct pmbus_driver_info *info;
++
++ int max_attributes;
++ int num_attributes;
++ struct attribute **attributes;
++ struct attribute_group group;
++
++ /*
++ * Sensors cover both sensor and limit registers.
++ */
++ int max_sensors;
++ int num_sensors;
++ struct pmbus_sensor *sensors;
++ /*
++ * Booleans are used for alarms.
++ * Values are determined from status registers.
++ */
++ int max_booleans;
++ int num_booleans;
++ struct pmbus_boolean *booleans;
++ /*
++ * Labels are used to map generic names (e.g., "in1")
++ * to PMBus specific names (e.g., "vin" or "vout1").
++ */
++ int max_labels;
++ int num_labels;
++ struct pmbus_label *labels;
++
++ struct mutex update_lock;
++ bool valid;
++ unsigned long last_updated; /* in jiffies */
++
++ /*
++ * A single status register covers multiple attributes,
++ * so we keep them all together.
++ */
++ u8 status[PB_NUM_STATUS_REG];
++
++ u8 currpage;
++};
++
++/*
++ * We are only concerned with the first fan. The get_target and set_target are
++ * are written accordingly.
++ */
++static ssize_t get_target(struct device *dev, struct device_attribute *devattr,
++ char *buf) {
++
++ struct i2c_client *client = to_i2c_client(dev);
++ struct pmbus_data *data = i2c_get_clientdata(client);
++ int val;
++ u32 rpm;
++
++ /*
++ * The FAN_COMMAND_n takes a value which is not the RPM.
++ * The value and RPM have a liner relation.
++ * rpm = (FAN_RPM_MIN/FAN_VALUE_MIN) * val
++ * The slope is (FAN_RPM_MIN/FAN_VALUE_MIN) = 180
++ */
++ mutex_lock(&data->update_lock);
++ val = pmbus_read_word_data(client, 0, PMBUS_FAN_COMMAND_1);
++ pmbus_clear_faults(client);
++ mutex_unlock(&data->update_lock);
++ if (val < 0) {
++ return val;
++ }
++ rpm = val * (FAN_RPM_MIN/FAN_VALUE_MIN);
++ return sprintf(buf, "%d\n", rpm);
++}
++
++static ssize_t set_target(struct device *dev, struct device_attribute *devattr,
++ const char *buf, size_t count) {
++
++ struct i2c_client *client = to_i2c_client(dev);
++ struct pmbus_data *data = i2c_get_clientdata(client);
++ int err;
++ unsigned int val;
++ unsigned int rpm;
++
++ err = kstrtol(buf, 10, &rpm);
++ if (err)
++ return err;
++
++ rpm = SENSORS_LIMIT(rpm, FAN_RPM_MIN, FAN_RPM_MAX);
++
++ mutex_lock(&data->update_lock);
++
++ val = FAN_VALUE_MIN * rpm;
++ val /= FAN_RPM_MIN;
++ pmbus_write_word_data(client, 0, PMBUS_FAN_COMMAND_1, (u16)val);
++ pmbus_clear_faults(client);
++
++ mutex_unlock(&data->update_lock);
++
++ return count;
++}
++
++static ssize_t show_pec(struct device *dev, struct device_attribute *dummy,
++ char *buf)
++{
++ struct i2c_client *client = to_i2c_client(dev);
++ return sprintf(buf, "%d\n", !!(client->flags & I2C_CLIENT_PEC));
++}
++
++static ssize_t set_pec(struct device *dev, struct device_attribute *dummy,
++ const char *buf, size_t count)
++{
++ struct i2c_client *client = to_i2c_client(dev);
++ long val;
++ int err;
++
++ err = strict_strtol(buf, 10, &val);
++ if (err < 0)
++ return err;
++
++ if (val != 0)
++ client->flags |= I2C_CLIENT_PEC;
++ else
++ client->flags &= ~I2C_CLIENT_PEC;
++
++ return count;
++}
++
++static SENSOR_DEVICE_ATTR(pec, S_IWUSR | S_IRUGO, show_pec, set_pec, 0);
++static SENSOR_DEVICE_ATTR(fan1_target, S_IWUSR | S_IRUGO, get_target,
++ set_target, 0);
++
++static struct attribute *dni_dps460_attrs[] = {
++ &sensor_dev_attr_fan1_target.dev_attr.attr,
++ &sensor_dev_attr_pec.dev_attr.attr,
++ NULL
++};
++static struct attribute_group dni_dps460_attr_grp = {
++ .attrs = dni_dps460_attrs,
++};
++
++static int dni_dps460_probe(struct i2c_client *client,
++ const struct i2c_device_id *id)
++{
++ struct pmbus_driver_info *info;
++ int ret;
++
++ if (!i2c_check_functionality(client->adapter,
++ I2C_FUNC_SMBUS_BYTE_DATA |
++ I2C_FUNC_SMBUS_WORD_DATA |
++ I2C_FUNC_SMBUS_PEC))
++ return -ENODEV;
++
++ /* Needs PEC(PACKET ERROR CODE). Writes wont work without this. */
++ client->flags = I2C_CLIENT_PEC;
++
++ info = kzalloc(sizeof(struct pmbus_driver_info), GFP_KERNEL);
++ if (!info)
++ return -ENOMEM;
++
++ /* Use only 1 page with 1 Fan, 2 Temps. */
++ info->pages = 1;
++ info->func[0] = PMBUS_HAVE_FAN12 | PMBUS_HAVE_STATUS_FAN12 |
++ PMBUS_HAVE_TEMP | PMBUS_HAVE_TEMP2 | PMBUS_HAVE_STATUS_TEMP;
++
++ ret = pmbus_do_probe(client, id, info);
++ if (ret < 0)
++ goto out;
++
++ ret = sysfs_create_group(&client->dev.kobj, &dni_dps460_attr_grp);
++ if (ret)
++ goto out;
++ return 0;
++out:
++ kfree(info);
++ return ret;
++}
++
++static int dni_dps460_remove(struct i2c_client *client)
++{
++ struct pmbus_data *data = i2c_get_clientdata(client);
++
++ sysfs_remove_group(&client->dev.kobj, &dni_dps460_attr_grp);
++ if (data->info)
++ kfree(data->info);
++ pmbus_do_remove(client);
++ return 0;
++}
++
++static const struct i2c_device_id dni_dps460_id[] = {
++ {"dni_dps460", dni_dps460},
++ {}
++};
++MODULE_DEVICE_TABLE(i2c, dni_dps460_id);
++
++static struct i2c_driver dni_dps460_driver = {
++ .driver = {
++ .name = "dni_dps460",
++ },
++ .probe = dni_dps460_probe,
++ .remove = dni_dps460_remove,
++ .id_table = dni_dps460_id,
++};
++
++module_i2c_driver(dni_dps460_driver);
++
++MODULE_AUTHOR("Puneet Shenoy");
++MODULE_DESCRIPTION("PMBus driver for Delta DPS460");
++MODULE_LICENSE("GPL");
diff --git a/packages/base/any/kernels/3.16-lts/patches/driver-hwmon-pmbus-ucd9200-mlnx.patch b/packages/base/any/kernels/3.16-lts/patches/driver-hwmon-pmbus-ucd9200-mlnx.patch
new file mode 100644
index 00000000..5d948675
--- /dev/null
+++ b/packages/base/any/kernels/3.16-lts/patches/driver-hwmon-pmbus-ucd9200-mlnx.patch
@@ -0,0 +1,89 @@
+mlnx patch for UCD9200
+
+From: Vadim Pasternak
+
+Patch replaces in device probing routine (ucd9000_probe) call
+i2c_smbus_read_block_data with i2c_smbus_read_i2c_block_data.
+
+The first call executes the SMBus "block read" protocol.
+Using this function requires that the client's adapter support
+the I2C_FUNC_SMBUS_READ_BLOCK_DATA functionality. Not all adapter
+drivers support this. In particular Mellanox i2c controller doesn't
+support it. API i2c_smbus_read_i2c_block_data is supposed to be
+more generic and be supported by all i2c client adapters.
+---
+ drivers/hwmon/pmbus/ucd9200.c | 26 +++++++++++++++++++++-----
+ 1 files changed, 21 insertions(+), 5 deletions(-)
+
+diff --git a/drivers/hwmon/pmbus/ucd9200.c b/drivers/hwmon/pmbus/ucd9200.c
+index 033d6ac..119130c 100644
+--- a/drivers/hwmon/pmbus/ucd9200.c
++++ b/drivers/hwmon/pmbus/ucd9200.c
+@@ -25,6 +25,7 @@
+ #include
+ #include
+ #include
++#include
+ #include "pmbus.h"
+
+ #define UCD9200_PHASE_INFO 0xd2
+@@ -52,14 +53,15 @@ static int ucd9200_probe(struct i2c_client *client,
+ u8 block_buffer[I2C_SMBUS_BLOCK_MAX + 1];
+ struct pmbus_driver_info *info;
+ const struct i2c_device_id *mid;
+- int i, j, ret;
++ int i, j, ret, n, len;
++ u8* buff;
+
+ if (!i2c_check_functionality(client->adapter,
+ I2C_FUNC_SMBUS_BYTE_DATA |
+ I2C_FUNC_SMBUS_BLOCK_DATA))
+ return -ENODEV;
+
+- ret = i2c_smbus_read_block_data(client, UCD9200_DEVICE_ID,
++ ret = i2c_smbus_read_i2c_block_data(client, UCD9200_DEVICE_ID, 8,
+ block_buffer);
+ if (ret < 0) {
+ dev_err(&client->dev, "Failed to read device ID\n");
+@@ -68,8 +70,22 @@ static int ucd9200_probe(struct i2c_client *client,
+ block_buffer[ret] = '\0';
+ dev_info(&client->dev, "Device ID %s\n", block_buffer);
+
++ len = strlen(block_buffer);
++ for (n=0; n < len; n++) {
++ if (isalnum(block_buffer[n]))
++ break;
++ }
++ if (n >= len) {
++ dev_err(&client->dev, "Incorrect device name\n");
++ return -ENODEV;
++ }
++ buff = &block_buffer[n];
++ len = strlen(buff);
++
+ for (mid = ucd9200_id; mid->name[0]; mid++) {
+- if (!strncasecmp(mid->name, block_buffer, strlen(mid->name)))
++ if (len != strlen(mid->name))
++ continue;
++ if (!strncasecmp(mid->name, buff, strlen(mid->name)))
+ break;
+ }
+ if (!mid->name[0]) {
+@@ -86,7 +102,7 @@ static int ucd9200_probe(struct i2c_client *client,
+ if (!info)
+ return -ENOMEM;
+
+- ret = i2c_smbus_read_block_data(client, UCD9200_PHASE_INFO,
++ ret = i2c_smbus_read_i2c_block_data(client, UCD9200_PHASE_INFO, 4,
+ block_buffer);
+ if (ret < 0) {
+ dev_err(&client->dev, "Failed to read phase information\n");
+@@ -100,7 +116,7 @@ static int ucd9200_probe(struct i2c_client *client,
+ * the first unconfigured rail.
+ */
+ info->pages = 0;
+- for (i = 0; i < ret; i++) {
++ for (i = 1; i < ret; i++) {
+ if (!block_buffer[i])
+ break;
+ info->pages++;
diff --git a/packages/base/any/kernels/3.16-lts/patches/driver-i2c-bus-intel-ismt-add-delay-param.patch b/packages/base/any/kernels/3.16-lts/patches/driver-i2c-bus-intel-ismt-add-delay-param.patch
new file mode 100644
index 00000000..bf6c4fc7
--- /dev/null
+++ b/packages/base/any/kernels/3.16-lts/patches/driver-i2c-bus-intel-ismt-add-delay-param.patch
@@ -0,0 +1,57 @@
+Add 'delay' module param to the driver.
+
+From: Cumulus Networks
+
+This is needed on S6000 for safe PMBUS access.
+Without setting the 'delay', the ismt driver throws 'completion wait
+timed out' error message.
+---
+ drivers/i2c/busses/i2c-ismt.c | 13 ++++++++++---
+ 1 file changed, 10 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/i2c/busses/i2c-ismt.c b/drivers/i2c/busses/i2c-ismt.c
+index d9ee43c..b2b3856 100644
+--- a/drivers/i2c/busses/i2c-ismt.c
++++ b/drivers/i2c/busses/i2c-ismt.c
+@@ -70,6 +70,7 @@
+ #include
+ #include
+ #include
++#include
+
+ #include
+
+@@ -192,9 +193,12 @@ static const struct pci_device_id ismt_ids[] = {
+ MODULE_DEVICE_TABLE(pci, ismt_ids);
+
+ /* Bus speed control bits for slow debuggers - refer to the docs for usage */
+-static unsigned int bus_speed;
++static unsigned int bus_speed = 100;
++static unsigned int delay = 1000;
+ module_param(bus_speed, uint, S_IRUGO);
+-MODULE_PARM_DESC(bus_speed, "Bus Speed in kHz (0 = BIOS default)");
++MODULE_PARM_DESC(bus_speed, "Bus Speed in kHz (1000 by default)");
++module_param(delay, uint, S_IRUGO);
++MODULE_PARM_DESC(delay, "Delay in microsecs before access (1000 by default)");
+
+ /**
+ * __ismt_desc_dump() - dump the contents of a specific descriptor
+@@ -391,6 +395,9 @@ static int ismt_access(struct i2c_adapter *adap, u16 addr,
+ struct ismt_priv *priv = i2c_get_adapdata(adap);
+ struct device *dev = &priv->pci_dev->dev;
+
++ if (delay > 0)
++ udelay(delay);
++
+ desc = &priv->hw[priv->head];
+
+ /* Initialize the DMA buffer */
+@@ -756,7 +763,7 @@ static void ismt_hw_init(struct ismt_priv *priv)
+ bus_speed = 1000;
+ break;
+ }
+- dev_dbg(dev, "SMBus clock is running at %d kHz\n", bus_speed);
++ dev_info(dev, "SMBus clock is running at %d kHz with delay %d us\n", bus_speed, delay);
+ }
+
+ /**
diff --git a/packages/base/any/kernels/3.16-lts/patches/driver-i2c-bus-intel-ismt-enable-param.patch b/packages/base/any/kernels/3.16-lts/patches/driver-i2c-bus-intel-ismt-enable-param.patch
new file mode 100644
index 00000000..612b02db
--- /dev/null
+++ b/packages/base/any/kernels/3.16-lts/patches/driver-i2c-bus-intel-ismt-enable-param.patch
@@ -0,0 +1,27 @@
+diff -urpN a/drivers/i2c/busses/i2c-ismt.c b/drivers/i2c/busses/i2c-ismt.c
+--- a/drivers/i2c/busses/i2c-ismt.c 2016-12-21 02:12:49.589201206 +0000
++++ b/drivers/i2c/busses/i2c-ismt.c 2016-12-21 02:15:03.973204122 +0000
+@@ -200,6 +200,11 @@ MODULE_PARM_DESC(bus_speed, "Bus Speed i
+ module_param(delay, uint, S_IRUGO);
+ MODULE_PARM_DESC(delay, "Delay in microsecs before access (1000 by default)");
+
++/* Enable/Disable driver */
++static unsigned int enable = 1;
++module_param(enable, uint, S_IRUGO);
++MODULE_PARM_DESC(enable, "Enable or disable the ISMT driver (enabled by default)");
++
+ /**
+ * __ismt_desc_dump() - dump the contents of a specific descriptor
+ */
+@@ -852,6 +857,11 @@ ismt_probe(struct pci_dev *pdev, const s
+ struct ismt_priv *priv;
+ unsigned long start, len;
+
++ if(!enable) {
++ dev_warn(&pdev->dev, "module is disabled.\n");
++ return -ENODEV;
++ }
++
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
diff --git a/packages/base/any/kernels/3.16-lts/patches/driver-igb-version-5.3.54.patch b/packages/base/any/kernels/3.16-lts/patches/driver-igb-version-5.3.54.patch
new file mode 100644
index 00000000..a3134c43
--- /dev/null
+++ b/packages/base/any/kernels/3.16-lts/patches/driver-igb-version-5.3.54.patch
@@ -0,0 +1,48795 @@
+diff -Nu a/drivers/net/ethernet/intel/igb/Makefile b/drivers/net/ethernet/intel/igb/Makefile
+--- a/drivers/net/ethernet/intel/igb/Makefile 2016-11-13 09:20:24.786171605 +0000
++++ b/drivers/net/ethernet/intel/igb/Makefile 2016-11-13 10:43:55.318238134 +0000
+@@ -32,5 +32,7 @@
+ obj-$(CONFIG_IGB) += igb.o
+
+ igb-objs := igb_main.o igb_ethtool.o e1000_82575.o \
+- e1000_mac.o e1000_nvm.o e1000_phy.o e1000_mbx.o \
+- e1000_i210.o igb_ptp.o igb_hwmon.o
++ e1000_mac.o e1000_nvm.o e1000_phy.o e1000_mbx.o \
++ e1000_i210.o igb_ptp.o igb_hwmon.o \
++ e1000_manage.o igb_param.o kcompat.o e1000_api.o \
++ igb_vmdq.o igb_procfs.o igb_debugfs.o
+diff -Nu a/drivers/net/ethernet/intel/igb/Module.supported b/drivers/net/ethernet/intel/igb/Module.supported
+--- a/drivers/net/ethernet/intel/igb/Module.supported 1970-01-01 00:00:00.000000000 +0000
++++ b/drivers/net/ethernet/intel/igb/Module.supported 2016-11-13 10:27:24.246224975 +0000
+@@ -0,0 +1 @@
++igb.ko external
+diff -Nu a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c
+--- a/drivers/net/ethernet/intel/igb/e1000_82575.c 2016-11-13 09:20:24.790171605 +0000
++++ b/drivers/net/ethernet/intel/igb/e1000_82575.c 2016-11-14 14:32:08.575567168 +0000
+@@ -1,94 +1,134 @@
+-/* Intel(R) Gigabit Ethernet Linux driver
+- * Copyright(c) 2007-2014 Intel Corporation.
+- *
+- * This program is free software; you can redistribute it and/or modify it
+- * under the terms and conditions of the GNU General Public License,
+- * version 2, as published by the Free Software Foundation.
+- *
+- * This program is distributed in the hope it will be useful, but WITHOUT
+- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+- * more details.
+- *
+- * You should have received a copy of the GNU General Public License along with
+- * this program; if not, see .
+- *
+- * The full GNU General Public License is included in this distribution in
+- * the file called "COPYING".
+- *
+- * Contact Information:
+- * e1000-devel Mailing List
+- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+- */
+-
+-/* e1000_82575
+- * e1000_82576
+- */
++/*******************************************************************************
+
+-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
++ Intel(R) Gigabit Ethernet Linux driver
++ Copyright(c) 2007-2015 Intel Corporation.
+
+-#include
+-#include
+-#include
++ This program is free software; you can redistribute it and/or modify it
++ under the terms and conditions of the GNU General Public License,
++ version 2, as published by the Free Software Foundation.
++
++ This program is distributed in the hope it will be useful, but WITHOUT
++ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ more details.
++
++ The full GNU General Public License is included in this distribution in
++ the file called "COPYING".
++
++ Contact Information:
++ Linux NICS
++ e1000-devel Mailing List
++ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
++
++*******************************************************************************/
++
++/*
++ * 82575EB Gigabit Network Connection
++ * 82575EB Gigabit Backplane Connection
++ * 82575GB Gigabit Network Connection
++ * 82576 Gigabit Network Connection
++ * 82576 Quad Port Gigabit Mezzanine Adapter
++ * 82580 Gigabit Network Connection
++ * I350 Gigabit Network Connection
++ */
+
+-#include "e1000_mac.h"
+-#include "e1000_82575.h"
++#include "e1000_api.h"
+ #include "e1000_i210.h"
+
+-static s32 igb_get_invariants_82575(struct e1000_hw *);
+-static s32 igb_acquire_phy_82575(struct e1000_hw *);
+-static void igb_release_phy_82575(struct e1000_hw *);
+-static s32 igb_acquire_nvm_82575(struct e1000_hw *);
+-static void igb_release_nvm_82575(struct e1000_hw *);
+-static s32 igb_check_for_link_82575(struct e1000_hw *);
+-static s32 igb_get_cfg_done_82575(struct e1000_hw *);
+-static s32 igb_init_hw_82575(struct e1000_hw *);
+-static s32 igb_phy_hw_reset_sgmii_82575(struct e1000_hw *);
+-static s32 igb_read_phy_reg_sgmii_82575(struct e1000_hw *, u32, u16 *);
+-static s32 igb_read_phy_reg_82580(struct e1000_hw *, u32, u16 *);
+-static s32 igb_write_phy_reg_82580(struct e1000_hw *, u32, u16);
+-static s32 igb_reset_hw_82575(struct e1000_hw *);
+-static s32 igb_reset_hw_82580(struct e1000_hw *);
+-static s32 igb_set_d0_lplu_state_82575(struct e1000_hw *, bool);
+-static s32 igb_set_d0_lplu_state_82580(struct e1000_hw *, bool);
+-static s32 igb_set_d3_lplu_state_82580(struct e1000_hw *, bool);
+-static s32 igb_setup_copper_link_82575(struct e1000_hw *);
+-static s32 igb_setup_serdes_link_82575(struct e1000_hw *);
+-static s32 igb_write_phy_reg_sgmii_82575(struct e1000_hw *, u32, u16);
+-static void igb_clear_hw_cntrs_82575(struct e1000_hw *);
+-static s32 igb_acquire_swfw_sync_82575(struct e1000_hw *, u16);
+-static s32 igb_get_pcs_speed_and_duplex_82575(struct e1000_hw *, u16 *,
+- u16 *);
+-static s32 igb_get_phy_id_82575(struct e1000_hw *);
+-static void igb_release_swfw_sync_82575(struct e1000_hw *, u16);
+-static bool igb_sgmii_active_82575(struct e1000_hw *);
+-static s32 igb_reset_init_script_82575(struct e1000_hw *);
+-static s32 igb_read_mac_addr_82575(struct e1000_hw *);
+-static s32 igb_set_pcie_completion_timeout(struct e1000_hw *hw);
+-static s32 igb_reset_mdicnfg_82580(struct e1000_hw *hw);
+-static s32 igb_validate_nvm_checksum_82580(struct e1000_hw *hw);
+-static s32 igb_update_nvm_checksum_82580(struct e1000_hw *hw);
+-static s32 igb_validate_nvm_checksum_i350(struct e1000_hw *hw);
+-static s32 igb_update_nvm_checksum_i350(struct e1000_hw *hw);
++static s32 e1000_init_phy_params_82575(struct e1000_hw *hw);
++static s32 e1000_init_mac_params_82575(struct e1000_hw *hw);
++static s32 e1000_acquire_phy_82575(struct e1000_hw *hw);
++static void e1000_release_phy_82575(struct e1000_hw *hw);
++static s32 e1000_acquire_nvm_82575(struct e1000_hw *hw);
++static void e1000_release_nvm_82575(struct e1000_hw *hw);
++static s32 e1000_check_for_link_82575(struct e1000_hw *hw);
++static s32 e1000_check_for_link_media_swap(struct e1000_hw *hw);
++static s32 e1000_get_cfg_done_82575(struct e1000_hw *hw);
++static s32 e1000_get_link_up_info_82575(struct e1000_hw *hw, u16 *speed,
++ u16 *duplex);
++static s32 e1000_phy_hw_reset_sgmii_82575(struct e1000_hw *hw);
++static s32 e1000_read_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset,
++ u16 *data);
++static s32 e1000_reset_hw_82575(struct e1000_hw *hw);
++static s32 e1000_reset_hw_82580(struct e1000_hw *hw);
++static s32 e1000_read_phy_reg_82580(struct e1000_hw *hw,
++ u32 offset, u16 *data);
++static s32 e1000_write_phy_reg_82580(struct e1000_hw *hw,
++ u32 offset, u16 data);
++static s32 e1000_set_d0_lplu_state_82580(struct e1000_hw *hw,
++ bool active);
++static s32 e1000_set_d3_lplu_state_82580(struct e1000_hw *hw,
++ bool active);
++static s32 e1000_set_d0_lplu_state_82575(struct e1000_hw *hw,
++ bool active);
++static s32 e1000_setup_copper_link_82575(struct e1000_hw *hw);
++static s32 e1000_setup_serdes_link_82575(struct e1000_hw *hw);
++static s32 e1000_get_media_type_82575(struct e1000_hw *hw);
++static s32 e1000_set_sfp_media_type_82575(struct e1000_hw *hw);
++static s32 e1000_valid_led_default_82575(struct e1000_hw *hw, u16 *data);
++static s32 e1000_write_phy_reg_sgmii_82575(struct e1000_hw *hw,
++ u32 offset, u16 data);
++static void e1000_clear_hw_cntrs_82575(struct e1000_hw *hw);
++static s32 e1000_acquire_swfw_sync_82575(struct e1000_hw *hw, u16 mask);
++static s32 e1000_get_pcs_speed_and_duplex_82575(struct e1000_hw *hw,
++ u16 *speed, u16 *duplex);
++static s32 e1000_get_phy_id_82575(struct e1000_hw *hw);
++static void e1000_release_swfw_sync_82575(struct e1000_hw *hw, u16 mask);
++static bool e1000_sgmii_active_82575(struct e1000_hw *hw);
++static s32 e1000_reset_init_script_82575(struct e1000_hw *hw);
++static s32 e1000_read_mac_addr_82575(struct e1000_hw *hw);
++static void e1000_config_collision_dist_82575(struct e1000_hw *hw);
++static void e1000_power_down_phy_copper_82575(struct e1000_hw *hw);
++static void e1000_shutdown_serdes_link_82575(struct e1000_hw *hw);
++static void e1000_power_up_serdes_link_82575(struct e1000_hw *hw);
++static s32 e1000_set_pcie_completion_timeout(struct e1000_hw *hw);
++static s32 e1000_reset_mdicnfg_82580(struct e1000_hw *hw);
++static s32 e1000_validate_nvm_checksum_82580(struct e1000_hw *hw);
++static s32 e1000_update_nvm_checksum_82580(struct e1000_hw *hw);
++static s32 e1000_update_nvm_checksum_with_offset(struct e1000_hw *hw,
++ u16 offset);
++static s32 e1000_validate_nvm_checksum_with_offset(struct e1000_hw *hw,
++ u16 offset);
++static s32 e1000_validate_nvm_checksum_i350(struct e1000_hw *hw);
++static s32 e1000_update_nvm_checksum_i350(struct e1000_hw *hw);
++static void e1000_clear_vfta_i350(struct e1000_hw *hw);
++
++static void e1000_i2c_start(struct e1000_hw *hw);
++static void e1000_i2c_stop(struct e1000_hw *hw);
++static s32 e1000_clock_in_i2c_byte(struct e1000_hw *hw, u8 *data);
++static s32 e1000_clock_out_i2c_byte(struct e1000_hw *hw, u8 data);
++static s32 e1000_get_i2c_ack(struct e1000_hw *hw);
++static s32 e1000_clock_in_i2c_bit(struct e1000_hw *hw, bool *data);
++static s32 e1000_clock_out_i2c_bit(struct e1000_hw *hw, bool data);
++static void e1000_raise_i2c_clk(struct e1000_hw *hw, u32 *i2cctl);
++static void e1000_lower_i2c_clk(struct e1000_hw *hw, u32 *i2cctl);
++static s32 e1000_set_i2c_data(struct e1000_hw *hw, u32 *i2cctl, bool data);
++static bool e1000_get_i2c_data(u32 *i2cctl);
++
+ static const u16 e1000_82580_rxpbs_table[] = {
+ 36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140 };
++#define E1000_82580_RXPBS_TABLE_SIZE \
++ (sizeof(e1000_82580_rxpbs_table) / \
++ sizeof(e1000_82580_rxpbs_table[0]))
+
+ /**
+- * igb_sgmii_uses_mdio_82575 - Determine if I2C pins are for external MDIO
++ * e1000_sgmii_uses_mdio_82575 - Determine if I2C pins are for external MDIO
+ * @hw: pointer to the HW structure
+ *
+ * Called to determine if the I2C pins are being used for I2C or as an
+ * external MDIO interface since the two options are mutually exclusive.
+ **/
+-static bool igb_sgmii_uses_mdio_82575(struct e1000_hw *hw)
++static bool e1000_sgmii_uses_mdio_82575(struct e1000_hw *hw)
+ {
+ u32 reg = 0;
+ bool ext_mdio = false;
+
++ DEBUGFUNC("e1000_sgmii_uses_mdio_82575");
++
+ switch (hw->mac.type) {
+ case e1000_82575:
+ case e1000_82576:
+- reg = rd32(E1000_MDIC);
++ reg = E1000_READ_REG(hw, E1000_MDIC);
+ ext_mdio = !!(reg & E1000_MDIC_DEST);
+ break;
+ case e1000_82580:
+@@ -96,7 +136,7 @@
+ case e1000_i354:
+ case e1000_i210:
+ case e1000_i211:
+- reg = rd32(E1000_MDICNFG);
++ reg = E1000_READ_REG(hw, E1000_MDICNFG);
+ ext_mdio = !!(reg & E1000_MDICNFG_EXT_MDIO);
+ break;
+ default:
+@@ -106,135 +146,98 @@
+ }
+
+ /**
+- * igb_check_for_link_media_swap - Check which M88E1112 interface linked
+- * @hw: pointer to the HW structure
+- *
+- * Poll the M88E1112 interfaces to see which interface achieved link.
+- */
+-static s32 igb_check_for_link_media_swap(struct e1000_hw *hw)
+-{
+- struct e1000_phy_info *phy = &hw->phy;
+- s32 ret_val;
+- u16 data;
+- u8 port = 0;
+-
+- /* Check the copper medium. */
+- ret_val = phy->ops.write_reg(hw, E1000_M88E1112_PAGE_ADDR, 0);
+- if (ret_val)
+- return ret_val;
+-
+- ret_val = phy->ops.read_reg(hw, E1000_M88E1112_STATUS, &data);
+- if (ret_val)
+- return ret_val;
+-
+- if (data & E1000_M88E1112_STATUS_LINK)
+- port = E1000_MEDIA_PORT_COPPER;
+-
+- /* Check the other medium. */
+- ret_val = phy->ops.write_reg(hw, E1000_M88E1112_PAGE_ADDR, 1);
+- if (ret_val)
+- return ret_val;
+-
+- ret_val = phy->ops.read_reg(hw, E1000_M88E1112_STATUS, &data);
+- if (ret_val)
+- return ret_val;
+-
+- /* reset page to 0 */
+- ret_val = phy->ops.write_reg(hw, E1000_M88E1112_PAGE_ADDR, 0);
+- if (ret_val)
+- return ret_val;
+-
+- if (data & E1000_M88E1112_STATUS_LINK)
+- port = E1000_MEDIA_PORT_OTHER;
+-
+- /* Determine if a swap needs to happen. */
+- if (port && (hw->dev_spec._82575.media_port != port)) {
+- hw->dev_spec._82575.media_port = port;
+- hw->dev_spec._82575.media_changed = true;
+- } else {
+- ret_val = igb_check_for_link_82575(hw);
+- }
+-
+- return 0;
+-}
+-
+-/**
+- * igb_init_phy_params_82575 - Init PHY func ptrs.
++ * e1000_init_phy_params_82575 - Init PHY func ptrs.
+ * @hw: pointer to the HW structure
+ **/
+-static s32 igb_init_phy_params_82575(struct e1000_hw *hw)
++static s32 e1000_init_phy_params_82575(struct e1000_hw *hw)
+ {
+ struct e1000_phy_info *phy = &hw->phy;
+- s32 ret_val = 0;
++ s32 ret_val = E1000_SUCCESS;
+ u32 ctrl_ext;
+
++ DEBUGFUNC("e1000_init_phy_params_82575");
++
++ phy->ops.read_i2c_byte = e1000_read_i2c_byte_generic;
++ phy->ops.write_i2c_byte = e1000_write_i2c_byte_generic;
++
+ if (hw->phy.media_type != e1000_media_type_copper) {
+ phy->type = e1000_phy_none;
+ goto out;
+ }
+
++ phy->ops.power_up = igb_e1000_power_up_phy_copper;
++ phy->ops.power_down = e1000_power_down_phy_copper_82575;
++
+ phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
+ phy->reset_delay_us = 100;
+
+- ctrl_ext = rd32(E1000_CTRL_EXT);
++ phy->ops.acquire = e1000_acquire_phy_82575;
++ phy->ops.check_reset_block = e1000_check_reset_block_generic;
++ phy->ops.commit = e1000_phy_sw_reset_generic;
++ phy->ops.get_cfg_done = e1000_get_cfg_done_82575;
++ phy->ops.release = e1000_release_phy_82575;
+
+- if (igb_sgmii_active_82575(hw)) {
+- phy->ops.reset = igb_phy_hw_reset_sgmii_82575;
++ ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
++
++ if (e1000_sgmii_active_82575(hw)) {
++ phy->ops.reset = e1000_phy_hw_reset_sgmii_82575;
+ ctrl_ext |= E1000_CTRL_I2C_ENA;
+ } else {
+- phy->ops.reset = igb_phy_hw_reset;
++ phy->ops.reset = e1000_phy_hw_reset_generic;
+ ctrl_ext &= ~E1000_CTRL_I2C_ENA;
+ }
+
+- wr32(E1000_CTRL_EXT, ctrl_ext);
+- igb_reset_mdicnfg_82580(hw);
++ E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
++ e1000_reset_mdicnfg_82580(hw);
+
+- if (igb_sgmii_active_82575(hw) && !igb_sgmii_uses_mdio_82575(hw)) {
+- phy->ops.read_reg = igb_read_phy_reg_sgmii_82575;
+- phy->ops.write_reg = igb_write_phy_reg_sgmii_82575;
++ if (e1000_sgmii_active_82575(hw) && !e1000_sgmii_uses_mdio_82575(hw)) {
++ phy->ops.read_reg = e1000_read_phy_reg_sgmii_82575;
++ phy->ops.write_reg = e1000_write_phy_reg_sgmii_82575;
+ } else {
+ switch (hw->mac.type) {
+ case e1000_82580:
+ case e1000_i350:
+ case e1000_i354:
+- phy->ops.read_reg = igb_read_phy_reg_82580;
+- phy->ops.write_reg = igb_write_phy_reg_82580;
++ phy->ops.read_reg = e1000_read_phy_reg_82580;
++ phy->ops.write_reg = e1000_write_phy_reg_82580;
+ break;
+ case e1000_i210:
+ case e1000_i211:
+- phy->ops.read_reg = igb_read_phy_reg_gs40g;
+- phy->ops.write_reg = igb_write_phy_reg_gs40g;
++ phy->ops.read_reg = e1000_read_phy_reg_gs40g;
++ phy->ops.write_reg = e1000_write_phy_reg_gs40g;
+ break;
+ default:
+- phy->ops.read_reg = igb_read_phy_reg_igp;
+- phy->ops.write_reg = igb_write_phy_reg_igp;
++ phy->ops.read_reg = e1000_read_phy_reg_igp;
++ phy->ops.write_reg = e1000_write_phy_reg_igp;
+ }
+ }
+
+- /* set lan id */
+- hw->bus.func = (rd32(E1000_STATUS) & E1000_STATUS_FUNC_MASK) >>
+- E1000_STATUS_FUNC_SHIFT;
+-
+ /* Set phy->phy_addr and phy->id. */
+- ret_val = igb_get_phy_id_82575(hw);
+- if (ret_val)
+- return ret_val;
++ ret_val = e1000_get_phy_id_82575(hw);
+
+ /* Verify phy id and set remaining function pointers */
+ switch (phy->id) {
+ case M88E1543_E_PHY_ID:
++ case M88E1512_E_PHY_ID:
+ case I347AT4_E_PHY_ID:
+ case M88E1112_E_PHY_ID:
++ case M88E1340M_E_PHY_ID:
+ case M88E1111_I_PHY_ID:
+ phy->type = e1000_phy_m88;
+- phy->ops.check_polarity = igb_check_polarity_m88;
+- phy->ops.get_phy_info = igb_get_phy_info_m88;
+- if (phy->id != M88E1111_I_PHY_ID)
++ phy->ops.check_polarity = igb_e1000_check_polarity_m88;
++ phy->ops.get_info = e1000_get_phy_info_m88;
++ if (phy->id == I347AT4_E_PHY_ID ||
++ phy->id == M88E1112_E_PHY_ID ||
++ phy->id == M88E1340M_E_PHY_ID)
+ phy->ops.get_cable_length =
+- igb_get_cable_length_m88_gen2;
++ e1000_get_cable_length_m88_gen2;
++ else if (phy->id == M88E1543_E_PHY_ID ||
++ phy->id == M88E1512_E_PHY_ID)
++ phy->ops.get_cable_length =
++ e1000_get_cable_length_m88_gen2;
+ else
+- phy->ops.get_cable_length = igb_get_cable_length_m88;
+- phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_m88;
++ phy->ops.get_cable_length = e1000_get_cable_length_m88;
++ phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88;
+ /* Check if this PHY is confgured for media swap. */
+ if (phy->id == M88E1112_E_PHY_ID) {
+ u16 data;
+@@ -256,35 +259,48 @@
+ if (data == E1000_M88E1112_AUTO_COPPER_SGMII ||
+ data == E1000_M88E1112_AUTO_COPPER_BASEX)
+ hw->mac.ops.check_for_link =
+- igb_check_for_link_media_swap;
++ e1000_check_for_link_media_swap;
++ }
++ if (phy->id == M88E1512_E_PHY_ID) {
++ ret_val = e1000_initialize_M88E1512_phy(hw);
++ if (ret_val)
++ goto out;
++ }
++ if (phy->id == M88E1543_E_PHY_ID) {
++ ret_val = e1000_initialize_M88E1543_phy(hw);
++ if (ret_val)
++ goto out;
+ }
+ break;
+ case IGP03E1000_E_PHY_ID:
++ case IGP04E1000_E_PHY_ID:
+ phy->type = e1000_phy_igp_3;
+- phy->ops.get_phy_info = igb_get_phy_info_igp;
+- phy->ops.get_cable_length = igb_get_cable_length_igp_2;
+- phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_igp;
+- phy->ops.set_d0_lplu_state = igb_set_d0_lplu_state_82575;
+- phy->ops.set_d3_lplu_state = igb_set_d3_lplu_state;
++ phy->ops.check_polarity = igb_e1000_check_polarity_igp;
++ phy->ops.get_info = e1000_get_phy_info_igp;
++ phy->ops.get_cable_length = e1000_get_cable_length_igp_2;
++ phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_igp;
++ phy->ops.set_d0_lplu_state = e1000_set_d0_lplu_state_82575;
++ phy->ops.set_d3_lplu_state = e1000_set_d3_lplu_state_generic;
+ break;
+ case I82580_I_PHY_ID:
+ case I350_I_PHY_ID:
+ phy->type = e1000_phy_82580;
++ phy->ops.check_polarity = igb_e1000_check_polarity_82577;
+ phy->ops.force_speed_duplex =
+- igb_phy_force_speed_duplex_82580;
+- phy->ops.get_cable_length = igb_get_cable_length_82580;
+- phy->ops.get_phy_info = igb_get_phy_info_82580;
+- phy->ops.set_d0_lplu_state = igb_set_d0_lplu_state_82580;
+- phy->ops.set_d3_lplu_state = igb_set_d3_lplu_state_82580;
++ igb_e1000_phy_force_speed_duplex_82577;
++ phy->ops.get_cable_length = igb_e1000_get_cable_length_82577;
++ phy->ops.get_info = igb_e1000_get_phy_info_82577;
++ phy->ops.set_d0_lplu_state = e1000_set_d0_lplu_state_82580;
++ phy->ops.set_d3_lplu_state = e1000_set_d3_lplu_state_82580;
+ break;
+ case I210_I_PHY_ID:
+ phy->type = e1000_phy_i210;
+- phy->ops.check_polarity = igb_check_polarity_m88;
+- phy->ops.get_phy_info = igb_get_phy_info_m88;
+- phy->ops.get_cable_length = igb_get_cable_length_m88_gen2;
+- phy->ops.set_d0_lplu_state = igb_set_d0_lplu_state_82580;
+- phy->ops.set_d3_lplu_state = igb_set_d3_lplu_state_82580;
+- phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_m88;
++ phy->ops.check_polarity = igb_e1000_check_polarity_m88;
++ phy->ops.get_info = e1000_get_phy_info_m88;
++ phy->ops.get_cable_length = e1000_get_cable_length_m88_gen2;
++ phy->ops.set_d0_lplu_state = e1000_set_d0_lplu_state_82580;
++ phy->ops.set_d3_lplu_state = e1000_set_d3_lplu_state_82580;
++ phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88;
+ break;
+ default:
+ ret_val = -E1000_ERR_PHY;
+@@ -296,19 +312,21 @@
+ }
+
+ /**
+- * igb_init_nvm_params_82575 - Init NVM func ptrs.
++ * e1000_init_nvm_params_82575 - Init NVM func ptrs.
+ * @hw: pointer to the HW structure
+ **/
+-static s32 igb_init_nvm_params_82575(struct e1000_hw *hw)
++s32 e1000_init_nvm_params_82575(struct e1000_hw *hw)
+ {
+ struct e1000_nvm_info *nvm = &hw->nvm;
+- u32 eecd = rd32(E1000_EECD);
++ u32 eecd = E1000_READ_REG(hw, E1000_EECD);
+ u16 size;
+
++ DEBUGFUNC("e1000_init_nvm_params_82575");
++
+ size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >>
+ E1000_EECD_SIZE_EX_SHIFT);
+-
+- /* Added to a constant, "size" becomes the left-shift value
++ /*
++ * Added to a constant, "size" becomes the left-shift value
+ * for setting word_size.
+ */
+ size += NVM_WORD_SIZE_BASE_SHIFT;
+@@ -320,433 +338,272 @@
+ size = 15;
+
+ nvm->word_size = 1 << size;
+- nvm->opcode_bits = 8;
+- nvm->delay_usec = 1;
++ if (hw->mac.type < e1000_i210) {
++ nvm->opcode_bits = 8;
++ nvm->delay_usec = 1;
++
++ switch (nvm->override) {
++ case e1000_nvm_override_spi_large:
++ nvm->page_size = 32;
++ nvm->address_bits = 16;
++ break;
++ case e1000_nvm_override_spi_small:
++ nvm->page_size = 8;
++ nvm->address_bits = 8;
++ break;
++ default:
++ nvm->page_size = eecd & E1000_EECD_ADDR_BITS ? 32 : 8;
++ nvm->address_bits = eecd & E1000_EECD_ADDR_BITS ?
++ 16 : 8;
++ break;
++ }
++ if (nvm->word_size == (1 << 15))
++ nvm->page_size = 128;
+
+- switch (nvm->override) {
+- case e1000_nvm_override_spi_large:
+- nvm->page_size = 32;
+- nvm->address_bits = 16;
+- break;
+- case e1000_nvm_override_spi_small:
+- nvm->page_size = 8;
+- nvm->address_bits = 8;
+- break;
+- default:
+- nvm->page_size = eecd & E1000_EECD_ADDR_BITS ? 32 : 8;
+- nvm->address_bits = eecd & E1000_EECD_ADDR_BITS ?
+- 16 : 8;
+- break;
+- }
+- if (nvm->word_size == (1 << 15))
+- nvm->page_size = 128;
+-
+- nvm->type = e1000_nvm_eeprom_spi;
+-
+- /* NVM Function Pointers */
+- nvm->ops.acquire = igb_acquire_nvm_82575;
+- nvm->ops.release = igb_release_nvm_82575;
+- nvm->ops.write = igb_write_nvm_spi;
+- nvm->ops.validate = igb_validate_nvm_checksum;
+- nvm->ops.update = igb_update_nvm_checksum;
++ nvm->type = e1000_nvm_eeprom_spi;
++ } else {
++ nvm->type = e1000_nvm_flash_hw;
++ }
++
++ /* Function Pointers */
++ nvm->ops.acquire = e1000_acquire_nvm_82575;
++ nvm->ops.release = e1000_release_nvm_82575;
+ if (nvm->word_size < (1 << 15))
+- nvm->ops.read = igb_read_nvm_eerd;
++ nvm->ops.read = e1000_read_nvm_eerd;
+ else
+- nvm->ops.read = igb_read_nvm_spi;
++ nvm->ops.read = e1000_read_nvm_spi;
++
++ nvm->ops.write = e1000_write_nvm_spi;
++ nvm->ops.validate = e1000_validate_nvm_checksum_generic;
++ nvm->ops.update = e1000_update_nvm_checksum_generic;
++ nvm->ops.valid_led_default = e1000_valid_led_default_82575;
+
+ /* override generic family function pointers for specific descendants */
+ switch (hw->mac.type) {
+ case e1000_82580:
+- nvm->ops.validate = igb_validate_nvm_checksum_82580;
+- nvm->ops.update = igb_update_nvm_checksum_82580;
++ nvm->ops.validate = e1000_validate_nvm_checksum_82580;
++ nvm->ops.update = e1000_update_nvm_checksum_82580;
+ break;
+- case e1000_i354:
+ case e1000_i350:
+- nvm->ops.validate = igb_validate_nvm_checksum_i350;
+- nvm->ops.update = igb_update_nvm_checksum_i350;
++ case e1000_i354:
++ nvm->ops.validate = e1000_validate_nvm_checksum_i350;
++ nvm->ops.update = e1000_update_nvm_checksum_i350;
+ break;
+ default:
+ break;
+ }
+
+- return 0;
++ return E1000_SUCCESS;
+ }
+
+ /**
+- * igb_init_mac_params_82575 - Init MAC func ptrs.
++ * e1000_init_mac_params_82575 - Init MAC func ptrs.
+ * @hw: pointer to the HW structure
+ **/
+-static s32 igb_init_mac_params_82575(struct e1000_hw *hw)
++static s32 e1000_init_mac_params_82575(struct e1000_hw *hw)
+ {
+ struct e1000_mac_info *mac = &hw->mac;
+ struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575;
+
++ DEBUGFUNC("e1000_init_mac_params_82575");
++
++ /* Derives media type */
++ e1000_get_media_type_82575(hw);
+ /* Set mta register count */
+ mac->mta_reg_count = 128;
++ /* Set uta register count */
++ mac->uta_reg_count = (hw->mac.type == e1000_82575) ? 0 : 128;
+ /* Set rar entry count */
+- switch (mac->type) {
+- case e1000_82576:
++ mac->rar_entry_count = E1000_RAR_ENTRIES_82575;
++ if (mac->type == e1000_82576)
+ mac->rar_entry_count = E1000_RAR_ENTRIES_82576;
+- break;
+- case e1000_82580:
++ if (mac->type == e1000_82580)
+ mac->rar_entry_count = E1000_RAR_ENTRIES_82580;
+- break;
+- case e1000_i350:
+- case e1000_i354:
++ if (mac->type == e1000_i350 || mac->type == e1000_i354)
+ mac->rar_entry_count = E1000_RAR_ENTRIES_I350;
+- break;
+- default:
+- mac->rar_entry_count = E1000_RAR_ENTRIES_82575;
+- break;
+- }
+- /* reset */
+- if (mac->type >= e1000_82580)
+- mac->ops.reset_hw = igb_reset_hw_82580;
+- else
+- mac->ops.reset_hw = igb_reset_hw_82575;
+
+- if (mac->type >= e1000_i210) {
+- mac->ops.acquire_swfw_sync = igb_acquire_swfw_sync_i210;
+- mac->ops.release_swfw_sync = igb_release_swfw_sync_i210;
++ /* Enable EEE default settings for EEE supported devices */
++ if (mac->type >= e1000_i350)
++ dev_spec->eee_disable = false;
+
+- } else {
+- mac->ops.acquire_swfw_sync = igb_acquire_swfw_sync_82575;
+- mac->ops.release_swfw_sync = igb_release_swfw_sync_82575;
+- }
++ /* Allow a single clear of the SW semaphore on I210 and newer */
++ if (mac->type >= e1000_i210)
++ dev_spec->clear_semaphore_once = true;
+
+ /* Set if part includes ASF firmware */
+ mac->asf_firmware_present = true;
+- /* Set if manageability features are enabled. */
++ /* FWSM register */
++ mac->has_fwsm = true;
++ /* ARC supported; valid only if manageability features are enabled. */
+ mac->arc_subsystem_valid =
+- (rd32(E1000_FWSM) & E1000_FWSM_MODE_MASK)
+- ? true : false;
+- /* enable EEE on i350 parts and later parts */
+- if (mac->type >= e1000_i350)
+- dev_spec->eee_disable = false;
++ !!(E1000_READ_REG(hw, E1000_FWSM) & E1000_FWSM_MODE_MASK);
++
++ /* Function pointers */
++
++ /* bus type/speed/width */
++ mac->ops.get_bus_info = igb_e1000_get_bus_info_pcie_generic;
++ /* reset */
++ if (mac->type >= e1000_82580)
++ mac->ops.reset_hw = e1000_reset_hw_82580;
+ else
+- dev_spec->eee_disable = true;
+- /* Allow a single clear of the SW semaphore on I210 and newer */
+- if (mac->type >= e1000_i210)
+- dev_spec->clear_semaphore_once = true;
++ mac->ops.reset_hw = e1000_reset_hw_82575;
++ /* hw initialization */
++ if ((mac->type == e1000_i210) || (mac->type == e1000_i211))
++ mac->ops.init_hw = e1000_init_hw_i210;
++ else
++ mac->ops.init_hw = e1000_init_hw_82575;
++ /* link setup */
++ mac->ops.setup_link = e1000_setup_link_generic;
+ /* physical interface link setup */
+ mac->ops.setup_physical_interface =
+ (hw->phy.media_type == e1000_media_type_copper)
+- ? igb_setup_copper_link_82575
+- : igb_setup_serdes_link_82575;
+-
+- if (mac->type == e1000_82580) {
+- switch (hw->device_id) {
+- /* feature not supported on these id's */
+- case E1000_DEV_ID_DH89XXCC_SGMII:
+- case E1000_DEV_ID_DH89XXCC_SERDES:
+- case E1000_DEV_ID_DH89XXCC_BACKPLANE:
+- case E1000_DEV_ID_DH89XXCC_SFP:
+- break;
+- default:
+- hw->dev_spec._82575.mas_capable = true;
+- break;
+- }
++ ? e1000_setup_copper_link_82575 : e1000_setup_serdes_link_82575;
++ /* physical interface shutdown */
++ mac->ops.shutdown_serdes = e1000_shutdown_serdes_link_82575;
++ /* physical interface power up */
++ mac->ops.power_up_serdes = e1000_power_up_serdes_link_82575;
++ /* check for link */
++ mac->ops.check_for_link = e1000_check_for_link_82575;
++ /* read mac address */
++ mac->ops.read_mac_addr = e1000_read_mac_addr_82575;
++ /* configure collision distance */
++ mac->ops.config_collision_dist = e1000_config_collision_dist_82575;
++ /* multicast address update */
++ mac->ops.update_mc_addr_list = e1000_update_mc_addr_list_generic;
++ if (hw->mac.type == e1000_i350 || mac->type == e1000_i354) {
++ /* writing VFTA */
++ mac->ops.write_vfta = e1000_write_vfta_i350;
++ /* clearing VFTA */
++ mac->ops.clear_vfta = e1000_clear_vfta_i350;
++ } else {
++ /* writing VFTA */
++ mac->ops.write_vfta = igb_e1000_write_vfta_generic;
++ /* clearing VFTA */
++ mac->ops.clear_vfta = igb_e1000_clear_vfta_generic;
++ }
++ if (hw->mac.type >= e1000_82580)
++ mac->ops.validate_mdi_setting =
++ e1000_validate_mdi_setting_crossover_generic;
++ /* ID LED init */
++ mac->ops.id_led_init = e1000_id_led_init_generic;
++ /* blink LED */
++ mac->ops.blink_led = e1000_blink_led_generic;
++ /* setup LED */
++ mac->ops.setup_led = e1000_setup_led_generic;
++ /* cleanup LED */
++ mac->ops.cleanup_led = e1000_cleanup_led_generic;
++ /* turn on/off LED */
++ mac->ops.led_on = e1000_led_on_generic;
++ mac->ops.led_off = e1000_led_off_generic;
++ /* clear hardware counters */
++ mac->ops.clear_hw_cntrs = e1000_clear_hw_cntrs_82575;
++ /* link info */
++ mac->ops.get_link_up_info = e1000_get_link_up_info_82575;
++ /* get thermal sensor data */
++ mac->ops.get_thermal_sensor_data =
++ e1000_get_thermal_sensor_data_generic;
++ mac->ops.init_thermal_sensor_thresh =
++ e1000_init_thermal_sensor_thresh_generic;
++ /* acquire SW_FW sync */
++ mac->ops.acquire_swfw_sync = e1000_acquire_swfw_sync_82575;
++ mac->ops.release_swfw_sync = e1000_release_swfw_sync_82575;
++ if (mac->type >= e1000_i210) {
++ mac->ops.acquire_swfw_sync = e1000_acquire_swfw_sync_i210;
++ mac->ops.release_swfw_sync = e1000_release_swfw_sync_i210;
+ }
+- return 0;
++
++ /* set lan id for port to determine which phy lock to use */
++ hw->mac.ops.set_lan_id(hw);
++
++ return E1000_SUCCESS;
+ }
+
+ /**
+- * igb_set_sfp_media_type_82575 - derives SFP module media type.
++ * e1000_init_function_pointers_82575 - Init func ptrs.
+ * @hw: pointer to the HW structure
+ *
+- * The media type is chosen based on SFP module.
+- * compatibility flags retrieved from SFP ID EEPROM.
++ * Called to initialize all function pointers and parameters.
+ **/
+-static s32 igb_set_sfp_media_type_82575(struct e1000_hw *hw)
++void e1000_init_function_pointers_82575(struct e1000_hw *hw)
+ {
+- s32 ret_val = E1000_ERR_CONFIG;
+- u32 ctrl_ext = 0;
+- struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575;
+- struct e1000_sfp_flags *eth_flags = &dev_spec->eth_flags;
+- u8 tranceiver_type = 0;
+- s32 timeout = 3;
++ DEBUGFUNC("e1000_init_function_pointers_82575");
+
+- /* Turn I2C interface ON and power on sfp cage */
+- ctrl_ext = rd32(E1000_CTRL_EXT);
+- ctrl_ext &= ~E1000_CTRL_EXT_SDP3_DATA;
+- wr32(E1000_CTRL_EXT, ctrl_ext | E1000_CTRL_I2C_ENA);
++ hw->mac.ops.init_params = e1000_init_mac_params_82575;
++ hw->nvm.ops.init_params = e1000_init_nvm_params_82575;
++ hw->phy.ops.init_params = e1000_init_phy_params_82575;
++ hw->mbx.ops.init_params = e1000_init_mbx_params_pf;
++}
+
+- wrfl();
++/**
++ * e1000_acquire_phy_82575 - Acquire rights to access PHY
++ * @hw: pointer to the HW structure
++ *
++ * Acquire access rights to the correct PHY.
++ **/
++static s32 e1000_acquire_phy_82575(struct e1000_hw *hw)
++{
++ u16 mask = E1000_SWFW_PHY0_SM;
+
+- /* Read SFP module data */
+- while (timeout) {
+- ret_val = igb_read_sfp_data_byte(hw,
+- E1000_I2CCMD_SFP_DATA_ADDR(E1000_SFF_IDENTIFIER_OFFSET),
+- &tranceiver_type);
+- if (ret_val == 0)
+- break;
+- msleep(100);
+- timeout--;
+- }
+- if (ret_val != 0)
+- goto out;
++ DEBUGFUNC("e1000_acquire_phy_82575");
+
+- ret_val = igb_read_sfp_data_byte(hw,
+- E1000_I2CCMD_SFP_DATA_ADDR(E1000_SFF_ETH_FLAGS_OFFSET),
+- (u8 *)eth_flags);
+- if (ret_val != 0)
+- goto out;
++ if (hw->bus.func == E1000_FUNC_1)
++ mask = E1000_SWFW_PHY1_SM;
++ else if (hw->bus.func == E1000_FUNC_2)
++ mask = E1000_SWFW_PHY2_SM;
++ else if (hw->bus.func == E1000_FUNC_3)
++ mask = E1000_SWFW_PHY3_SM;
+
+- /* Check if there is some SFP module plugged and powered */
+- if ((tranceiver_type == E1000_SFF_IDENTIFIER_SFP) ||
+- (tranceiver_type == E1000_SFF_IDENTIFIER_SFF)) {
+- dev_spec->module_plugged = true;
+- if (eth_flags->e1000_base_lx || eth_flags->e1000_base_sx) {
+- hw->phy.media_type = e1000_media_type_internal_serdes;
+- } else if (eth_flags->e100_base_fx) {
+- dev_spec->sgmii_active = true;
+- hw->phy.media_type = e1000_media_type_internal_serdes;
+- } else if (eth_flags->e1000_base_t) {
+- dev_spec->sgmii_active = true;
+- hw->phy.media_type = e1000_media_type_copper;
+- } else {
+- hw->phy.media_type = e1000_media_type_unknown;
+- hw_dbg("PHY module has not been recognized\n");
+- goto out;
+- }
+- } else {
+- hw->phy.media_type = e1000_media_type_unknown;
+- }
+- ret_val = 0;
+-out:
+- /* Restore I2C interface setting */
+- wr32(E1000_CTRL_EXT, ctrl_ext);
+- return ret_val;
++ return hw->mac.ops.acquire_swfw_sync(hw, mask);
+ }
+
+-static s32 igb_get_invariants_82575(struct e1000_hw *hw)
++/**
++ * e1000_release_phy_82575 - Release rights to access PHY
++ * @hw: pointer to the HW structure
++ *
++ * A wrapper to release access rights to the correct PHY.
++ **/
++static void e1000_release_phy_82575(struct e1000_hw *hw)
+ {
+- struct e1000_mac_info *mac = &hw->mac;
+- struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575;
+- s32 ret_val;
+- u32 ctrl_ext = 0;
+- u32 link_mode = 0;
++ u16 mask = E1000_SWFW_PHY0_SM;
+
+- switch (hw->device_id) {
+- case E1000_DEV_ID_82575EB_COPPER:
+- case E1000_DEV_ID_82575EB_FIBER_SERDES:
+- case E1000_DEV_ID_82575GB_QUAD_COPPER:
+- mac->type = e1000_82575;
+- break;
+- case E1000_DEV_ID_82576:
+- case E1000_DEV_ID_82576_NS:
+- case E1000_DEV_ID_82576_NS_SERDES:
+- case E1000_DEV_ID_82576_FIBER:
+- case E1000_DEV_ID_82576_SERDES:
+- case E1000_DEV_ID_82576_QUAD_COPPER:
+- case E1000_DEV_ID_82576_QUAD_COPPER_ET2:
+- case E1000_DEV_ID_82576_SERDES_QUAD:
+- mac->type = e1000_82576;
+- break;
+- case E1000_DEV_ID_82580_COPPER:
+- case E1000_DEV_ID_82580_FIBER:
+- case E1000_DEV_ID_82580_QUAD_FIBER:
+- case E1000_DEV_ID_82580_SERDES:
+- case E1000_DEV_ID_82580_SGMII:
+- case E1000_DEV_ID_82580_COPPER_DUAL:
+- case E1000_DEV_ID_DH89XXCC_SGMII:
+- case E1000_DEV_ID_DH89XXCC_SERDES:
+- case E1000_DEV_ID_DH89XXCC_BACKPLANE:
+- case E1000_DEV_ID_DH89XXCC_SFP:
+- mac->type = e1000_82580;
+- break;
+- case E1000_DEV_ID_I350_COPPER:
+- case E1000_DEV_ID_I350_FIBER:
+- case E1000_DEV_ID_I350_SERDES:
+- case E1000_DEV_ID_I350_SGMII:
+- mac->type = e1000_i350;
+- break;
+- case E1000_DEV_ID_I210_COPPER:
+- case E1000_DEV_ID_I210_FIBER:
+- case E1000_DEV_ID_I210_SERDES:
+- case E1000_DEV_ID_I210_SGMII:
+- case E1000_DEV_ID_I210_COPPER_FLASHLESS:
+- case E1000_DEV_ID_I210_SERDES_FLASHLESS:
+- mac->type = e1000_i210;
+- break;
+- case E1000_DEV_ID_I211_COPPER:
+- mac->type = e1000_i211;
+- break;
+- case E1000_DEV_ID_I354_BACKPLANE_1GBPS:
+- case E1000_DEV_ID_I354_SGMII:
+- case E1000_DEV_ID_I354_BACKPLANE_2_5GBPS:
+- mac->type = e1000_i354;
+- break;
+- default:
+- return -E1000_ERR_MAC_INIT;
+- break;
+- }
++ DEBUGFUNC("e1000_release_phy_82575");
+
+- /* Set media type */
+- /* The 82575 uses bits 22:23 for link mode. The mode can be changed
+- * based on the EEPROM. We cannot rely upon device ID. There
+- * is no distinguishable difference between fiber and internal
+- * SerDes mode on the 82575. There can be an external PHY attached
+- * on the SGMII interface. For this, we'll set sgmii_active to true.
+- */
+- hw->phy.media_type = e1000_media_type_copper;
+- dev_spec->sgmii_active = false;
+- dev_spec->module_plugged = false;
++ if (hw->bus.func == E1000_FUNC_1)
++ mask = E1000_SWFW_PHY1_SM;
++ else if (hw->bus.func == E1000_FUNC_2)
++ mask = E1000_SWFW_PHY2_SM;
++ else if (hw->bus.func == E1000_FUNC_3)
++ mask = E1000_SWFW_PHY3_SM;
+
+- ctrl_ext = rd32(E1000_CTRL_EXT);
++ hw->mac.ops.release_swfw_sync(hw, mask);
++}
+
+- link_mode = ctrl_ext & E1000_CTRL_EXT_LINK_MODE_MASK;
+- switch (link_mode) {
+- case E1000_CTRL_EXT_LINK_MODE_1000BASE_KX:
+- hw->phy.media_type = e1000_media_type_internal_serdes;
+- break;
+- case E1000_CTRL_EXT_LINK_MODE_SGMII:
+- /* Get phy control interface type set (MDIO vs. I2C)*/
+- if (igb_sgmii_uses_mdio_82575(hw)) {
+- hw->phy.media_type = e1000_media_type_copper;
+- dev_spec->sgmii_active = true;
+- break;
+- }
+- /* fall through for I2C based SGMII */
+- case E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES:
+- /* read media type from SFP EEPROM */
+- ret_val = igb_set_sfp_media_type_82575(hw);
+- if ((ret_val != 0) ||
+- (hw->phy.media_type == e1000_media_type_unknown)) {
+- /* If media type was not identified then return media
+- * type defined by the CTRL_EXT settings.
+- */
+- hw->phy.media_type = e1000_media_type_internal_serdes;
++/**
++ * e1000_read_phy_reg_sgmii_82575 - Read PHY register using sgmii
++ * @hw: pointer to the HW structure
++ * @offset: register offset to be read
++ * @data: pointer to the read data
++ *
++ * Reads the PHY register at offset using the serial gigabit media independent
++ * interface and stores the retrieved information in data.
++ **/
++static s32 e1000_read_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset,
++ u16 *data)
++{
++ s32 ret_val = -E1000_ERR_PARAM;
+
+- if (link_mode == E1000_CTRL_EXT_LINK_MODE_SGMII) {
+- hw->phy.media_type = e1000_media_type_copper;
+- dev_spec->sgmii_active = true;
+- }
++ DEBUGFUNC("e1000_read_phy_reg_sgmii_82575");
+
+- break;
+- }
++ if (offset > E1000_MAX_SGMII_PHY_REG_ADDR) {
++ DEBUGOUT1("PHY Address %u is out of range\n", offset);
++ goto out;
++ }
+
+- /* do not change link mode for 100BaseFX */
+- if (dev_spec->eth_flags.e100_base_fx)
+- break;
++ ret_val = hw->phy.ops.acquire(hw);
++ if (ret_val)
++ goto out;
+
+- /* change current link mode setting */
+- ctrl_ext &= ~E1000_CTRL_EXT_LINK_MODE_MASK;
+-
+- if (hw->phy.media_type == e1000_media_type_copper)
+- ctrl_ext |= E1000_CTRL_EXT_LINK_MODE_SGMII;
+- else
+- ctrl_ext |= E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES;
+-
+- wr32(E1000_CTRL_EXT, ctrl_ext);
+-
+- break;
+- default:
+- break;
+- }
+-
+- /* mac initialization and operations */
+- ret_val = igb_init_mac_params_82575(hw);
+- if (ret_val)
+- goto out;
+-
+- /* NVM initialization */
+- ret_val = igb_init_nvm_params_82575(hw);
+- switch (hw->mac.type) {
+- case e1000_i210:
+- case e1000_i211:
+- ret_val = igb_init_nvm_params_i210(hw);
+- break;
+- default:
+- break;
+- }
+-
+- if (ret_val)
+- goto out;
+-
+- /* if part supports SR-IOV then initialize mailbox parameters */
+- switch (mac->type) {
+- case e1000_82576:
+- case e1000_i350:
+- igb_init_mbx_params_pf(hw);
+- break;
+- default:
+- break;
+- }
+-
+- /* setup PHY parameters */
+- ret_val = igb_init_phy_params_82575(hw);
+-
+-out:
+- return ret_val;
+-}
+-
+-/**
+- * igb_acquire_phy_82575 - Acquire rights to access PHY
+- * @hw: pointer to the HW structure
+- *
+- * Acquire access rights to the correct PHY. This is a
+- * function pointer entry point called by the api module.
+- **/
+-static s32 igb_acquire_phy_82575(struct e1000_hw *hw)
+-{
+- u16 mask = E1000_SWFW_PHY0_SM;
+-
+- if (hw->bus.func == E1000_FUNC_1)
+- mask = E1000_SWFW_PHY1_SM;
+- else if (hw->bus.func == E1000_FUNC_2)
+- mask = E1000_SWFW_PHY2_SM;
+- else if (hw->bus.func == E1000_FUNC_3)
+- mask = E1000_SWFW_PHY3_SM;
+-
+- return hw->mac.ops.acquire_swfw_sync(hw, mask);
+-}
+-
+-/**
+- * igb_release_phy_82575 - Release rights to access PHY
+- * @hw: pointer to the HW structure
+- *
+- * A wrapper to release access rights to the correct PHY. This is a
+- * function pointer entry point called by the api module.
+- **/
+-static void igb_release_phy_82575(struct e1000_hw *hw)
+-{
+- u16 mask = E1000_SWFW_PHY0_SM;
+-
+- if (hw->bus.func == E1000_FUNC_1)
+- mask = E1000_SWFW_PHY1_SM;
+- else if (hw->bus.func == E1000_FUNC_2)
+- mask = E1000_SWFW_PHY2_SM;
+- else if (hw->bus.func == E1000_FUNC_3)
+- mask = E1000_SWFW_PHY3_SM;
+-
+- hw->mac.ops.release_swfw_sync(hw, mask);
+-}
+-
+-/**
+- * igb_read_phy_reg_sgmii_82575 - Read PHY register using sgmii
+- * @hw: pointer to the HW structure
+- * @offset: register offset to be read
+- * @data: pointer to the read data
+- *
+- * Reads the PHY register at offset using the serial gigabit media independent
+- * interface and stores the retrieved information in data.
+- **/
+-static s32 igb_read_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset,
+- u16 *data)
+-{
+- s32 ret_val = -E1000_ERR_PARAM;
+-
+- if (offset > E1000_MAX_SGMII_PHY_REG_ADDR) {
+- hw_dbg("PHY Address %u is out of range\n", offset);
+- goto out;
+- }
+-
+- ret_val = hw->phy.ops.acquire(hw);
+- if (ret_val)
+- goto out;
+-
+- ret_val = igb_read_phy_reg_i2c(hw, offset, data);
++ ret_val = e1000_read_phy_reg_i2c(hw, offset, data);
+
+ hw->phy.ops.release(hw);
+
+@@ -755,7 +612,7 @@
+ }
+
+ /**
+- * igb_write_phy_reg_sgmii_82575 - Write PHY register using sgmii
++ * e1000_write_phy_reg_sgmii_82575 - Write PHY register using sgmii
+ * @hw: pointer to the HW structure
+ * @offset: register offset to write to
+ * @data: data to write at register offset
+@@ -763,14 +620,15 @@
+ * Writes the data to PHY register at the offset using the serial gigabit
+ * media independent interface.
+ **/
+-static s32 igb_write_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset,
++static s32 e1000_write_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset,
+ u16 data)
+ {
+ s32 ret_val = -E1000_ERR_PARAM;
+
++ DEBUGFUNC("e1000_write_phy_reg_sgmii_82575");
+
+ if (offset > E1000_MAX_SGMII_PHY_REG_ADDR) {
+- hw_dbg("PHY Address %d is out of range\n", offset);
++ DEBUGOUT1("PHY Address %d is out of range\n", offset);
+ goto out;
+ }
+
+@@ -778,7 +636,7 @@
+ if (ret_val)
+ goto out;
+
+- ret_val = igb_write_phy_reg_i2c(hw, offset, data);
++ ret_val = e1000_write_phy_reg_i2c(hw, offset, data);
+
+ hw->phy.ops.release(hw);
+
+@@ -787,41 +645,44 @@
+ }
+
+ /**
+- * igb_get_phy_id_82575 - Retrieve PHY addr and id
++ * e1000_get_phy_id_82575 - Retrieve PHY addr and id
+ * @hw: pointer to the HW structure
+ *
+ * Retrieves the PHY address and ID for both PHY's which do and do not use
+ * sgmi interface.
+ **/
+-static s32 igb_get_phy_id_82575(struct e1000_hw *hw)
++static s32 e1000_get_phy_id_82575(struct e1000_hw *hw)
+ {
+ struct e1000_phy_info *phy = &hw->phy;
+- s32 ret_val = 0;
++ s32 ret_val = E1000_SUCCESS;
+ u16 phy_id;
+ u32 ctrl_ext;
+ u32 mdic;
+
+- /* Extra read required for some PHY's on i354 */
++ DEBUGFUNC("e1000_get_phy_id_82575");
++
++ /* some i354 devices need an extra read for phy id */
+ if (hw->mac.type == e1000_i354)
+- igb_get_phy_id(hw);
++ e1000_get_phy_id(hw);
+
+- /* For SGMII PHYs, we try the list of possible addresses until
++ /*
++ * For SGMII PHYs, we try the list of possible addresses until
+ * we find one that works. For non-SGMII PHYs
+ * (e.g. integrated copper PHYs), an address of 1 should
+ * work. The result of this function should mean phy->phy_addr
+ * and phy->id are set correctly.
+ */
+- if (!(igb_sgmii_active_82575(hw))) {
++ if (!e1000_sgmii_active_82575(hw)) {
+ phy->addr = 1;
+- ret_val = igb_get_phy_id(hw);
++ ret_val = e1000_get_phy_id(hw);
+ goto out;
+ }
+
+- if (igb_sgmii_uses_mdio_82575(hw)) {
++ if (e1000_sgmii_uses_mdio_82575(hw)) {
+ switch (hw->mac.type) {
+ case e1000_82575:
+ case e1000_82576:
+- mdic = rd32(E1000_MDIC);
++ mdic = E1000_READ_REG(hw, E1000_MDIC);
+ mdic &= E1000_MDIC_PHY_MASK;
+ phy->addr = mdic >> E1000_MDIC_PHY_SHIFT;
+ break;
+@@ -830,7 +691,7 @@
+ case e1000_i354:
+ case e1000_i210:
+ case e1000_i211:
+- mdic = rd32(E1000_MDICNFG);
++ mdic = E1000_READ_REG(hw, E1000_MDICNFG);
+ mdic &= E1000_MDICNFG_PHY_MASK;
+ phy->addr = mdic >> E1000_MDICNFG_PHY_SHIFT;
+ break;
+@@ -839,31 +700,35 @@
+ goto out;
+ break;
+ }
+- ret_val = igb_get_phy_id(hw);
++ ret_val = e1000_get_phy_id(hw);
+ goto out;
+ }
+
+ /* Power on sgmii phy if it is disabled */
+- ctrl_ext = rd32(E1000_CTRL_EXT);
+- wr32(E1000_CTRL_EXT, ctrl_ext & ~E1000_CTRL_EXT_SDP3_DATA);
+- wrfl();
+- msleep(300);
++ ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
++ E1000_WRITE_REG(hw, E1000_CTRL_EXT,
++ ctrl_ext & ~E1000_CTRL_EXT_SDP3_DATA);
++ E1000_WRITE_FLUSH(hw);
++ msec_delay(300);
+
+- /* The address field in the I2CCMD register is 3 bits and 0 is invalid.
++ /*
++ * The address field in the I2CCMD register is 3 bits and 0 is invalid.
+ * Therefore, we need to test 1-7
+ */
+ for (phy->addr = 1; phy->addr < 8; phy->addr++) {
+- ret_val = igb_read_phy_reg_sgmii_82575(hw, PHY_ID1, &phy_id);
+- if (ret_val == 0) {
+- hw_dbg("Vendor ID 0x%08X read at address %u\n",
+- phy_id, phy->addr);
+- /* At the time of this writing, The M88 part is
++ ret_val = e1000_read_phy_reg_sgmii_82575(hw, PHY_ID1, &phy_id);
++ if (ret_val == E1000_SUCCESS) {
++ DEBUGOUT2("Vendor ID 0x%08X read at address %u\n",
++ phy_id, phy->addr);
++ /*
++ * At the time of this writing, The M88 part is
+ * the only supported SGMII PHY product.
+ */
+ if (phy_id == M88_VENDOR)
+ break;
+ } else {
+- hw_dbg("PHY address %u was unreadable\n", phy->addr);
++ DEBUGOUT1("PHY address %u was unreadable\n",
++ phy->addr);
+ }
+ }
+
+@@ -871,49 +736,60 @@
+ if (phy->addr == 8) {
+ phy->addr = 0;
+ ret_val = -E1000_ERR_PHY;
+- goto out;
+ } else {
+- ret_val = igb_get_phy_id(hw);
++ ret_val = e1000_get_phy_id(hw);
+ }
+
+ /* restore previous sfp cage power state */
+- wr32(E1000_CTRL_EXT, ctrl_ext);
++ E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
+
+ out:
+ return ret_val;
+ }
+
+ /**
+- * igb_phy_hw_reset_sgmii_82575 - Performs a PHY reset
++ * e1000_phy_hw_reset_sgmii_82575 - Performs a PHY reset
+ * @hw: pointer to the HW structure
+ *
+ * Resets the PHY using the serial gigabit media independent interface.
+ **/
+-static s32 igb_phy_hw_reset_sgmii_82575(struct e1000_hw *hw)
++static s32 e1000_phy_hw_reset_sgmii_82575(struct e1000_hw *hw)
+ {
+- s32 ret_val;
++ s32 ret_val = E1000_SUCCESS;
++ struct e1000_phy_info *phy = &hw->phy;
+
+- /* This isn't a true "hard" reset, but is the only reset
++ DEBUGFUNC("e1000_phy_hw_reset_sgmii_82575");
++
++ /*
++ * This isn't a true "hard" reset, but is the only reset
+ * available to us at this time.
+ */
+
+- hw_dbg("Soft resetting SGMII attached PHY...\n");
++ DEBUGOUT("Soft resetting SGMII attached PHY...\n");
++
++ if (!(hw->phy.ops.write_reg))
++ goto out;
+
+- /* SFP documentation requires the following to configure the SPF module
++ /*
++ * SFP documentation requires the following to configure the SPF module
+ * to work on SGMII. No further documentation is given.
+ */
+ ret_val = hw->phy.ops.write_reg(hw, 0x1B, 0x8084);
+ if (ret_val)
+ goto out;
+
+- ret_val = igb_phy_sw_reset(hw);
++ ret_val = hw->phy.ops.commit(hw);
++ if (ret_val)
++ goto out;
+
++ if (phy->id == M88E1512_E_PHY_ID)
++ ret_val = e1000_initialize_M88E1512_phy(hw);
+ out:
+ return ret_val;
+ }
+
+ /**
+- * igb_set_d0_lplu_state_82575 - Set Low Power Linkup D0 state
++ * e1000_set_d0_lplu_state_82575 - Set Low Power Linkup D0 state
+ * @hw: pointer to the HW structure
+ * @active: true to enable LPLU, false to disable
+ *
+@@ -925,12 +801,17 @@
+ * This is a function pointer entry point only called by
+ * PHY setup routines.
+ **/
+-static s32 igb_set_d0_lplu_state_82575(struct e1000_hw *hw, bool active)
++static s32 e1000_set_d0_lplu_state_82575(struct e1000_hw *hw, bool active)
+ {
+ struct e1000_phy_info *phy = &hw->phy;
+- s32 ret_val;
++ s32 ret_val = E1000_SUCCESS;
+ u16 data;
+
++ DEBUGFUNC("e1000_set_d0_lplu_state_82575");
++
++ if (!(hw->phy.ops.read_reg))
++ goto out;
++
+ ret_val = phy->ops.read_reg(hw, IGP02E1000_PHY_POWER_MGMT, &data);
+ if (ret_val)
+ goto out;
+@@ -938,47 +819,52 @@
+ if (active) {
+ data |= IGP02E1000_PM_D0_LPLU;
+ ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT,
+- data);
++ data);
+ if (ret_val)
+ goto out;
+
+ /* When LPLU is enabled, we should disable SmartSpeed */
+ ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
+- &data);
++ &data);
+ data &= ~IGP01E1000_PSCFR_SMART_SPEED;
+ ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
+- data);
++ data);
+ if (ret_val)
+ goto out;
+ } else {
+ data &= ~IGP02E1000_PM_D0_LPLU;
+ ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT,
+- data);
+- /* LPLU and SmartSpeed are mutually exclusive. LPLU is used
++ data);
++ /*
++ * LPLU and SmartSpeed are mutually exclusive. LPLU is used
+ * during Dx states where the power conservation is most
+ * important. During driver activity we should enable
+ * SmartSpeed, so performance is maintained.
+ */
+ if (phy->smart_speed == e1000_smart_speed_on) {
+ ret_val = phy->ops.read_reg(hw,
+- IGP01E1000_PHY_PORT_CONFIG, &data);
++ IGP01E1000_PHY_PORT_CONFIG,
++ &data);
+ if (ret_val)
+ goto out;
+
+ data |= IGP01E1000_PSCFR_SMART_SPEED;
+ ret_val = phy->ops.write_reg(hw,
+- IGP01E1000_PHY_PORT_CONFIG, data);
++ IGP01E1000_PHY_PORT_CONFIG,
++ data);
+ if (ret_val)
+ goto out;
+ } else if (phy->smart_speed == e1000_smart_speed_off) {
+ ret_val = phy->ops.read_reg(hw,
+- IGP01E1000_PHY_PORT_CONFIG, &data);
++ IGP01E1000_PHY_PORT_CONFIG,
++ &data);
+ if (ret_val)
+ goto out;
+
+ data &= ~IGP01E1000_PSCFR_SMART_SPEED;
+ ret_val = phy->ops.write_reg(hw,
+- IGP01E1000_PHY_PORT_CONFIG, data);
++ IGP01E1000_PHY_PORT_CONFIG,
++ data);
+ if (ret_val)
+ goto out;
+ }
+@@ -989,7 +875,7 @@
+ }
+
+ /**
+- * igb_set_d0_lplu_state_82580 - Set Low Power Linkup D0 state
++ * e1000_set_d0_lplu_state_82580 - Set Low Power Linkup D0 state
+ * @hw: pointer to the HW structure
+ * @active: true to enable LPLU, false to disable
+ *
+@@ -1001,12 +887,14 @@
+ * This is a function pointer entry point only called by
+ * PHY setup routines.
+ **/
+-static s32 igb_set_d0_lplu_state_82580(struct e1000_hw *hw, bool active)
++static s32 e1000_set_d0_lplu_state_82580(struct e1000_hw *hw, bool active)
+ {
+ struct e1000_phy_info *phy = &hw->phy;
+- u16 data;
++ u32 data;
+
+- data = rd32(E1000_82580_PHY_POWER_MGMT);
++ DEBUGFUNC("e1000_set_d0_lplu_state_82580");
++
++ data = E1000_READ_REG(hw, E1000_82580_PHY_POWER_MGMT);
+
+ if (active) {
+ data |= E1000_82580_PM_D0_LPLU;
+@@ -1016,7 +904,8 @@
+ } else {
+ data &= ~E1000_82580_PM_D0_LPLU;
+
+- /* LPLU and SmartSpeed are mutually exclusive. LPLU is used
++ /*
++ * LPLU and SmartSpeed are mutually exclusive. LPLU is used
+ * during Dx states where the power conservation is most
+ * important. During driver activity we should enable
+ * SmartSpeed, so performance is maintained.
+@@ -1024,14 +913,15 @@
+ if (phy->smart_speed == e1000_smart_speed_on)
+ data |= E1000_82580_PM_SPD;
+ else if (phy->smart_speed == e1000_smart_speed_off)
+- data &= ~E1000_82580_PM_SPD; }
++ data &= ~E1000_82580_PM_SPD;
++ }
+
+- wr32(E1000_82580_PHY_POWER_MGMT, data);
+- return 0;
++ E1000_WRITE_REG(hw, E1000_82580_PHY_POWER_MGMT, data);
++ return E1000_SUCCESS;
+ }
+
+ /**
+- * igb_set_d3_lplu_state_82580 - Sets low power link up state for D3
++ * e1000_set_d3_lplu_state_82580 - Sets low power link up state for D3
+ * @hw: pointer to the HW structure
+ * @active: boolean used to enable/disable lplu
+ *
+@@ -1044,16 +934,19 @@
+ * During driver activity, SmartSpeed should be enabled so performance is
+ * maintained.
+ **/
+-static s32 igb_set_d3_lplu_state_82580(struct e1000_hw *hw, bool active)
++s32 e1000_set_d3_lplu_state_82580(struct e1000_hw *hw, bool active)
+ {
+ struct e1000_phy_info *phy = &hw->phy;
+- u16 data;
++ u32 data;
+
+- data = rd32(E1000_82580_PHY_POWER_MGMT);
++ DEBUGFUNC("e1000_set_d3_lplu_state_82580");
++
++ data = E1000_READ_REG(hw, E1000_82580_PHY_POWER_MGMT);
+
+ if (!active) {
+ data &= ~E1000_82580_PM_D3_LPLU;
+- /* LPLU and SmartSpeed are mutually exclusive. LPLU is used
++ /*
++ * LPLU and SmartSpeed are mutually exclusive. LPLU is used
+ * during Dx states where the power conservation is most
+ * important. During driver activity we should enable
+ * SmartSpeed, so performance is maintained.
+@@ -1070,12 +963,12 @@
+ data &= ~E1000_82580_PM_SPD;
+ }
+
+- wr32(E1000_82580_PHY_POWER_MGMT, data);
+- return 0;
++ E1000_WRITE_REG(hw, E1000_82580_PHY_POWER_MGMT, data);
++ return E1000_SUCCESS;
+ }
+
+ /**
+- * igb_acquire_nvm_82575 - Request for access to EEPROM
++ * e1000_acquire_nvm_82575 - Request for access to EEPROM
+ * @hw: pointer to the HW structure
+ *
+ * Acquire the necessary semaphores for exclusive access to the EEPROM.
+@@ -1083,148 +976,183 @@
+ * Return successful if access grant bit set, else clear the request for
+ * EEPROM access and return -E1000_ERR_NVM (-1).
+ **/
+-static s32 igb_acquire_nvm_82575(struct e1000_hw *hw)
++static s32 e1000_acquire_nvm_82575(struct e1000_hw *hw)
+ {
+- s32 ret_val;
++ s32 ret_val = E1000_SUCCESS;
+
+- ret_val = hw->mac.ops.acquire_swfw_sync(hw, E1000_SWFW_EEP_SM);
++ DEBUGFUNC("e1000_acquire_nvm_82575");
++
++ ret_val = e1000_acquire_swfw_sync_82575(hw, E1000_SWFW_EEP_SM);
+ if (ret_val)
+ goto out;
+
+- ret_val = igb_acquire_nvm(hw);
++ /*
++ * Check if there is some access
++ * error this access may hook on
++ */
++ if (hw->mac.type == e1000_i350) {
++ u32 eecd = E1000_READ_REG(hw, E1000_EECD);
++ if (eecd & (E1000_EECD_BLOCKED | E1000_EECD_ABORT |
++ E1000_EECD_TIMEOUT)) {
++ /* Clear all access error flags */
++ E1000_WRITE_REG(hw, E1000_EECD, eecd |
++ E1000_EECD_ERROR_CLR);
++ DEBUGOUT("Nvm bit banging access error detected and cleared.\n");
++ }
++ }
++
++ if (hw->mac.type == e1000_82580) {
++ u32 eecd = E1000_READ_REG(hw, E1000_EECD);
++ if (eecd & E1000_EECD_BLOCKED) {
++ /* Clear access error flag */
++ E1000_WRITE_REG(hw, E1000_EECD, eecd |
++ E1000_EECD_BLOCKED);
++ DEBUGOUT("Nvm bit banging access error detected and cleared.\n");
++ }
++ }
+
++ ret_val = e1000_acquire_nvm_generic(hw);
+ if (ret_val)
+- hw->mac.ops.release_swfw_sync(hw, E1000_SWFW_EEP_SM);
++ e1000_release_swfw_sync_82575(hw, E1000_SWFW_EEP_SM);
+
+ out:
+ return ret_val;
+ }
+
+ /**
+- * igb_release_nvm_82575 - Release exclusive access to EEPROM
++ * e1000_release_nvm_82575 - Release exclusive access to EEPROM
+ * @hw: pointer to the HW structure
+ *
+ * Stop any current commands to the EEPROM and clear the EEPROM request bit,
+ * then release the semaphores acquired.
+ **/
+-static void igb_release_nvm_82575(struct e1000_hw *hw)
++static void e1000_release_nvm_82575(struct e1000_hw *hw)
+ {
+- igb_release_nvm(hw);
+- hw->mac.ops.release_swfw_sync(hw, E1000_SWFW_EEP_SM);
++ DEBUGFUNC("e1000_release_nvm_82575");
++
++ e1000_release_nvm_generic(hw);
++
++ e1000_release_swfw_sync_82575(hw, E1000_SWFW_EEP_SM);
+ }
+
+ /**
+- * igb_acquire_swfw_sync_82575 - Acquire SW/FW semaphore
++ * e1000_acquire_swfw_sync_82575 - Acquire SW/FW semaphore
+ * @hw: pointer to the HW structure
+ * @mask: specifies which semaphore to acquire
+ *
+ * Acquire the SW/FW semaphore to access the PHY or NVM. The mask
+ * will also specify which port we're acquiring the lock for.
+ **/
+-static s32 igb_acquire_swfw_sync_82575(struct e1000_hw *hw, u16 mask)
++static s32 e1000_acquire_swfw_sync_82575(struct e1000_hw *hw, u16 mask)
+ {
+ u32 swfw_sync;
+ u32 swmask = mask;
+ u32 fwmask = mask << 16;
+- s32 ret_val = 0;
+- s32 i = 0, timeout = 200; /* FIXME: find real value to use here */
++ s32 ret_val = E1000_SUCCESS;
++ s32 i = 0, timeout = 200;
++
++ DEBUGFUNC("e1000_acquire_swfw_sync_82575");
+
+ while (i < timeout) {
+- if (igb_get_hw_semaphore(hw)) {
++ if (e1000_get_hw_semaphore_generic(hw)) {
+ ret_val = -E1000_ERR_SWFW_SYNC;
+ goto out;
+ }
+
+- swfw_sync = rd32(E1000_SW_FW_SYNC);
++ swfw_sync = E1000_READ_REG(hw, E1000_SW_FW_SYNC);
+ if (!(swfw_sync & (fwmask | swmask)))
+ break;
+
+- /* Firmware currently using resource (fwmask)
++ /*
++ * Firmware currently using resource (fwmask)
+ * or other software thread using resource (swmask)
+ */
+- igb_put_hw_semaphore(hw);
+- mdelay(5);
++ e1000_put_hw_semaphore_generic(hw);
++ msec_delay_irq(5);
+ i++;
+ }
+
+ if (i == timeout) {
+- hw_dbg("Driver can't access resource, SW_FW_SYNC timeout.\n");
++ DEBUGOUT("Driver can't access resource, SW_FW_SYNC timeout.\n");
+ ret_val = -E1000_ERR_SWFW_SYNC;
+ goto out;
+ }
+
+ swfw_sync |= swmask;
+- wr32(E1000_SW_FW_SYNC, swfw_sync);
++ E1000_WRITE_REG(hw, E1000_SW_FW_SYNC, swfw_sync);
+
+- igb_put_hw_semaphore(hw);
++ e1000_put_hw_semaphore_generic(hw);
+
+ out:
+ return ret_val;
+ }
+
+ /**
+- * igb_release_swfw_sync_82575 - Release SW/FW semaphore
++ * e1000_release_swfw_sync_82575 - Release SW/FW semaphore
+ * @hw: pointer to the HW structure
+ * @mask: specifies which semaphore to acquire
+ *
+ * Release the SW/FW semaphore used to access the PHY or NVM. The mask
+ * will also specify which port we're releasing the lock for.
+ **/
+-static void igb_release_swfw_sync_82575(struct e1000_hw *hw, u16 mask)
++static void e1000_release_swfw_sync_82575(struct e1000_hw *hw, u16 mask)
+ {
+ u32 swfw_sync;
+
+- while (igb_get_hw_semaphore(hw) != 0)
++ DEBUGFUNC("e1000_release_swfw_sync_82575");
++
++ while (e1000_get_hw_semaphore_generic(hw) != E1000_SUCCESS)
+ ; /* Empty */
+
+- swfw_sync = rd32(E1000_SW_FW_SYNC);
++ swfw_sync = E1000_READ_REG(hw, E1000_SW_FW_SYNC);
+ swfw_sync &= ~mask;
+- wr32(E1000_SW_FW_SYNC, swfw_sync);
++ E1000_WRITE_REG(hw, E1000_SW_FW_SYNC, swfw_sync);
+
+- igb_put_hw_semaphore(hw);
++ e1000_put_hw_semaphore_generic(hw);
+ }
+
+ /**
+- * igb_get_cfg_done_82575 - Read config done bit
++ * e1000_get_cfg_done_82575 - Read config done bit
+ * @hw: pointer to the HW structure
+ *
+ * Read the management control register for the config done bit for
+ * completion status. NOTE: silicon which is EEPROM-less will fail trying
+ * to read the config done bit, so an error is *ONLY* logged and returns
+- * 0. If we were to return with error, EEPROM-less silicon
++ * E1000_SUCCESS. If we were to return with error, EEPROM-less silicon
+ * would not be able to be reset or change link.
+ **/
+-static s32 igb_get_cfg_done_82575(struct e1000_hw *hw)
++static s32 e1000_get_cfg_done_82575(struct e1000_hw *hw)
+ {
+ s32 timeout = PHY_CFG_TIMEOUT;
+ u32 mask = E1000_NVM_CFG_DONE_PORT_0;
+
+- if (hw->bus.func == 1)
++ DEBUGFUNC("e1000_get_cfg_done_82575");
++
++ if (hw->bus.func == E1000_FUNC_1)
+ mask = E1000_NVM_CFG_DONE_PORT_1;
+ else if (hw->bus.func == E1000_FUNC_2)
+ mask = E1000_NVM_CFG_DONE_PORT_2;
+ else if (hw->bus.func == E1000_FUNC_3)
+ mask = E1000_NVM_CFG_DONE_PORT_3;
+-
+ while (timeout) {
+- if (rd32(E1000_EEMNGCTL) & mask)
++ if (E1000_READ_REG(hw, E1000_EEMNGCTL) & mask)
+ break;
+- usleep_range(1000, 2000);
++ msec_delay(1);
+ timeout--;
+ }
+ if (!timeout)
+- hw_dbg("MNG configuration cycle has not completed.\n");
++ DEBUGOUT("MNG configuration cycle has not completed.\n");
+
+ /* If EEPROM is not marked present, init the PHY manually */
+- if (((rd32(E1000_EECD) & E1000_EECD_PRES) == 0) &&
++ if (!(E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_PRES) &&
+ (hw->phy.type == e1000_phy_igp_3))
+- igb_phy_init_script_igp3(hw);
++ e1000_phy_init_script_igp3(hw);
+
+- return 0;
++ return E1000_SUCCESS;
+ }
+
+ /**
+- * igb_get_link_up_info_82575 - Get link speed/duplex info
++ * e1000_get_link_up_info_82575 - Get link speed/duplex info
+ * @hw: pointer to the HW structure
+ * @speed: stores the current speed
+ * @duplex: stores the current duplex
+@@ -1233,87 +1161,156 @@
+ * interface, use PCS to retrieve the link speed and duplex information.
+ * Otherwise, use the generic function to get the link speed and duplex info.
+ **/
+-static s32 igb_get_link_up_info_82575(struct e1000_hw *hw, u16 *speed,
++static s32 e1000_get_link_up_info_82575(struct e1000_hw *hw, u16 *speed,
+ u16 *duplex)
+ {
+ s32 ret_val;
+
++ DEBUGFUNC("e1000_get_link_up_info_82575");
++
+ if (hw->phy.media_type != e1000_media_type_copper)
+- ret_val = igb_get_pcs_speed_and_duplex_82575(hw, speed,
++ ret_val = e1000_get_pcs_speed_and_duplex_82575(hw, speed,
+ duplex);
+ else
+- ret_val = igb_get_speed_and_duplex_copper(hw, speed,
++ ret_val = e1000_get_speed_and_duplex_copper_generic(hw, speed,
+ duplex);
+
+ return ret_val;
+ }
+
+ /**
+- * igb_check_for_link_82575 - Check for link
++ * e1000_check_for_link_82575 - Check for link
+ * @hw: pointer to the HW structure
+ *
+ * If sgmii is enabled, then use the pcs register to determine link, otherwise
+ * use the generic interface for determining link.
+ **/
+-static s32 igb_check_for_link_82575(struct e1000_hw *hw)
++static s32 e1000_check_for_link_82575(struct e1000_hw *hw)
+ {
+ s32 ret_val;
+ u16 speed, duplex;
+
++ DEBUGFUNC("e1000_check_for_link_82575");
++
+ if (hw->phy.media_type != e1000_media_type_copper) {
+- ret_val = igb_get_pcs_speed_and_duplex_82575(hw, &speed,
+- &duplex);
+- /* Use this flag to determine if link needs to be checked or
+- * not. If we have link clear the flag so that we do not
++ ret_val = e1000_get_pcs_speed_and_duplex_82575(hw, &speed,
++ &duplex);
++ /*
++ * Use this flag to determine if link needs to be checked or
++ * not. If we have link clear the flag so that we do not
+ * continue to check for link.
+ */
+ hw->mac.get_link_status = !hw->mac.serdes_has_link;
+
+- /* Configure Flow Control now that Auto-Neg has completed.
++ /*
++ * Configure Flow Control now that Auto-Neg has completed.
+ * First, we need to restore the desired flow control
+ * settings because we may have had to re-autoneg with a
+ * different link partner.
+ */
+- ret_val = igb_config_fc_after_link_up(hw);
++ ret_val = e1000_config_fc_after_link_up_generic(hw);
+ if (ret_val)
+- hw_dbg("Error configuring flow control\n");
++ DEBUGOUT("Error configuring flow control\n");
+ } else {
+- ret_val = igb_check_for_copper_link(hw);
++ ret_val = e1000_check_for_copper_link_generic(hw);
+ }
+
+ return ret_val;
+ }
+
+ /**
+- * igb_power_up_serdes_link_82575 - Power up the serdes link after shutdown
++ * e1000_check_for_link_media_swap - Check which M88E1112 interface linked
++ * @hw: pointer to the HW structure
++ *
++ * Poll the M88E1112 interfaces to see which interface achieved link.
++ */
++static s32 e1000_check_for_link_media_swap(struct e1000_hw *hw)
++{
++ struct e1000_phy_info *phy = &hw->phy;
++ s32 ret_val;
++ u16 data;
++ u8 port = 0;
++
++ DEBUGFUNC("e1000_check_for_link_media_swap");
++
++ /* Check for copper. */
++ ret_val = phy->ops.write_reg(hw, E1000_M88E1112_PAGE_ADDR, 0);
++ if (ret_val)
++ return ret_val;
++
++ ret_val = phy->ops.read_reg(hw, E1000_M88E1112_STATUS, &data);
++ if (ret_val)
++ return ret_val;
++
++ if (data & E1000_M88E1112_STATUS_LINK)
++ port = E1000_MEDIA_PORT_COPPER;
++
++ /* Check for other. */
++ ret_val = phy->ops.write_reg(hw, E1000_M88E1112_PAGE_ADDR, 1);
++ if (ret_val)
++ return ret_val;
++
++ ret_val = phy->ops.read_reg(hw, E1000_M88E1112_STATUS, &data);
++ if (ret_val)
++ return ret_val;
++
++ if (data & E1000_M88E1112_STATUS_LINK)
++ port = E1000_MEDIA_PORT_OTHER;
++
++ /* Determine if a swap needs to happen. */
++ if (port && (hw->dev_spec._82575.media_port != port)) {
++ hw->dev_spec._82575.media_port = port;
++ hw->dev_spec._82575.media_changed = true;
++ }
++
++ if (port == E1000_MEDIA_PORT_COPPER) {
++ /* reset page to 0 */
++ ret_val = phy->ops.write_reg(hw, E1000_M88E1112_PAGE_ADDR, 0);
++ if (ret_val)
++ return ret_val;
++ e1000_check_for_link_82575(hw);
++ } else {
++ e1000_check_for_link_82575(hw);
++ /* reset page to 0 */
++ ret_val = phy->ops.write_reg(hw, E1000_M88E1112_PAGE_ADDR, 0);
++ if (ret_val)
++ return ret_val;
++ }
++
++ return E1000_SUCCESS;
++}
++
++/**
++ * e1000_power_up_serdes_link_82575 - Power up the serdes link after shutdown
+ * @hw: pointer to the HW structure
+ **/
+-void igb_power_up_serdes_link_82575(struct e1000_hw *hw)
++static void e1000_power_up_serdes_link_82575(struct e1000_hw *hw)
+ {
+ u32 reg;
+
++ DEBUGFUNC("e1000_power_up_serdes_link_82575");
+
+ if ((hw->phy.media_type != e1000_media_type_internal_serdes) &&
+- !igb_sgmii_active_82575(hw))
++ !e1000_sgmii_active_82575(hw))
+ return;
+
+ /* Enable PCS to turn on link */
+- reg = rd32(E1000_PCS_CFG0);
++ reg = E1000_READ_REG(hw, E1000_PCS_CFG0);
+ reg |= E1000_PCS_CFG_PCS_EN;
+- wr32(E1000_PCS_CFG0, reg);
++ E1000_WRITE_REG(hw, E1000_PCS_CFG0, reg);
+
+ /* Power up the laser */
+- reg = rd32(E1000_CTRL_EXT);
++ reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
+ reg &= ~E1000_CTRL_EXT_SDP3_DATA;
+- wr32(E1000_CTRL_EXT, reg);
++ E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg);
+
+ /* flush the write to verify completion */
+- wrfl();
+- usleep_range(1000, 2000);
++ E1000_WRITE_FLUSH(hw);
++ msec_delay(1);
+ }
+
+ /**
+- * igb_get_pcs_speed_and_duplex_82575 - Retrieve current speed/duplex
++ * e1000_get_pcs_speed_and_duplex_82575 - Retrieve current speed/duplex
+ * @hw: pointer to the HW structure
+ * @speed: stores the current speed
+ * @duplex: stores the current duplex
+@@ -1321,28 +1318,26 @@
+ * Using the physical coding sub-layer (PCS), retrieve the current speed and
+ * duplex, then store the values in the pointers provided.
+ **/
+-static s32 igb_get_pcs_speed_and_duplex_82575(struct e1000_hw *hw, u16 *speed,
+- u16 *duplex)
++static s32 e1000_get_pcs_speed_and_duplex_82575(struct e1000_hw *hw,
++ u16 *speed, u16 *duplex)
+ {
+ struct e1000_mac_info *mac = &hw->mac;
+- u32 pcs, status;
++ u32 pcs;
++ u32 status;
+
+- /* Set up defaults for the return values of this function */
+- mac->serdes_has_link = false;
+- *speed = 0;
+- *duplex = 0;
++ DEBUGFUNC("e1000_get_pcs_speed_and_duplex_82575");
+
+- /* Read the PCS Status register for link state. For non-copper mode,
++ /*
++ * Read the PCS Status register for link state. For non-copper mode,
+ * the status register is not accurate. The PCS status register is
+ * used instead.
+ */
+- pcs = rd32(E1000_PCS_LSTAT);
++ pcs = E1000_READ_REG(hw, E1000_PCS_LSTAT);
+
+- /* The link up bit determines when link is up on autoneg. The sync ok
+- * gets set once both sides sync up and agree upon link. Stable link
+- * can be determined by checking for both link up and link sync ok
++ /*
++ * The link up bit determines when link is up on autoneg.
+ */
+- if ((pcs & E1000_PCS_LSTS_LINK_OK) && (pcs & E1000_PCS_LSTS_SYNK_OK)) {
++ if (pcs & E1000_PCS_LSTS_LINK_OK) {
+ mac->serdes_has_link = true;
+
+ /* Detect and store PCS speed */
+@@ -1359,192 +1354,202 @@
+ else
+ *duplex = HALF_DUPLEX;
+
+- /* Check if it is an I354 2.5Gb backplane connection. */
++ /* Check if it is an I354 2.5Gb backplane connection. */
+ if (mac->type == e1000_i354) {
+- status = rd32(E1000_STATUS);
++ status = E1000_READ_REG(hw, E1000_STATUS);
+ if ((status & E1000_STATUS_2P5_SKU) &&
+ !(status & E1000_STATUS_2P5_SKU_OVER)) {
+ *speed = SPEED_2500;
+ *duplex = FULL_DUPLEX;
+- hw_dbg("2500 Mbs, ");
+- hw_dbg("Full Duplex\n");
++ DEBUGOUT("2500 Mbs, ");
++ DEBUGOUT("Full Duplex\n");
+ }
+ }
+
++ } else {
++ mac->serdes_has_link = false;
++ *speed = 0;
++ *duplex = 0;
+ }
+
+- return 0;
++ return E1000_SUCCESS;
+ }
+
+ /**
+- * igb_shutdown_serdes_link_82575 - Remove link during power down
++ * e1000_shutdown_serdes_link_82575 - Remove link during power down
+ * @hw: pointer to the HW structure
+ *
+- * In the case of fiber serdes, shut down optics and PCS on driver unload
++ * In the case of serdes shut down sfp and PCS on driver unload
+ * when management pass thru is not enabled.
+ **/
+-void igb_shutdown_serdes_link_82575(struct e1000_hw *hw)
++void e1000_shutdown_serdes_link_82575(struct e1000_hw *hw)
+ {
+ u32 reg;
+
+- if (hw->phy.media_type != e1000_media_type_internal_serdes &&
+- igb_sgmii_active_82575(hw))
++ DEBUGFUNC("e1000_shutdown_serdes_link_82575");
++
++ if ((hw->phy.media_type != e1000_media_type_internal_serdes) &&
++ !e1000_sgmii_active_82575(hw))
+ return;
+
+- if (!igb_enable_mng_pass_thru(hw)) {
++ if (!igb_e1000_enable_mng_pass_thru(hw)) {
+ /* Disable PCS to turn off link */
+- reg = rd32(E1000_PCS_CFG0);
++ reg = E1000_READ_REG(hw, E1000_PCS_CFG0);
+ reg &= ~E1000_PCS_CFG_PCS_EN;
+- wr32(E1000_PCS_CFG0, reg);
++ E1000_WRITE_REG(hw, E1000_PCS_CFG0, reg);
+
+ /* shutdown the laser */
+- reg = rd32(E1000_CTRL_EXT);
++ reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
+ reg |= E1000_CTRL_EXT_SDP3_DATA;
+- wr32(E1000_CTRL_EXT, reg);
++ E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg);
+
+ /* flush the write to verify completion */
+- wrfl();
+- usleep_range(1000, 2000);
++ E1000_WRITE_FLUSH(hw);
++ msec_delay(1);
+ }
++
++ return;
+ }
+
+ /**
+- * igb_reset_hw_82575 - Reset hardware
++ * e1000_reset_hw_82575 - Reset hardware
+ * @hw: pointer to the HW structure
+ *
+- * This resets the hardware into a known state. This is a
+- * function pointer entry point called by the api module.
++ * This resets the hardware into a known state.
+ **/
+-static s32 igb_reset_hw_82575(struct e1000_hw *hw)
++static s32 e1000_reset_hw_82575(struct e1000_hw *hw)
+ {
+ u32 ctrl;
+ s32 ret_val;
+
+- /* Prevent the PCI-E bus from sticking if there is no TLP connection
++ DEBUGFUNC("e1000_reset_hw_82575");
++
++ /*
++ * Prevent the PCI-E bus from sticking if there is no TLP connection
+ * on the last TLP read/write transaction when MAC is reset.
+ */
+- ret_val = igb_disable_pcie_master(hw);
++ ret_val = e1000_disable_pcie_master_generic(hw);
+ if (ret_val)
+- hw_dbg("PCI-E Master disable polling has failed.\n");
++ DEBUGOUT("PCI-E Master disable polling has failed.\n");
+
+ /* set the completion timeout for interface */
+- ret_val = igb_set_pcie_completion_timeout(hw);
++ ret_val = e1000_set_pcie_completion_timeout(hw);
+ if (ret_val)
+- hw_dbg("PCI-E Set completion timeout has failed.\n");
++ DEBUGOUT("PCI-E Set completion timeout has failed.\n");
+
+- hw_dbg("Masking off all interrupts\n");
+- wr32(E1000_IMC, 0xffffffff);
++ DEBUGOUT("Masking off all interrupts\n");
++ E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
+
+- wr32(E1000_RCTL, 0);
+- wr32(E1000_TCTL, E1000_TCTL_PSP);
+- wrfl();
++ E1000_WRITE_REG(hw, E1000_RCTL, 0);
++ E1000_WRITE_REG(hw, E1000_TCTL, E1000_TCTL_PSP);
++ E1000_WRITE_FLUSH(hw);
+
+- usleep_range(10000, 20000);
++ msec_delay(10);
+
+- ctrl = rd32(E1000_CTRL);
++ ctrl = E1000_READ_REG(hw, E1000_CTRL);
+
+- hw_dbg("Issuing a global reset to MAC\n");
+- wr32(E1000_CTRL, ctrl | E1000_CTRL_RST);
++ DEBUGOUT("Issuing a global reset to MAC\n");
++ E1000_WRITE_REG(hw, E1000_CTRL, ctrl | E1000_CTRL_RST);
+
+- ret_val = igb_get_auto_rd_done(hw);
++ ret_val = e1000_get_auto_rd_done_generic(hw);
+ if (ret_val) {
+- /* When auto config read does not complete, do not
++ /*
++ * When auto config read does not complete, do not
+ * return with an error. This can happen in situations
+ * where there is no eeprom and prevents getting link.
+ */
+- hw_dbg("Auto Read Done did not complete\n");
++ DEBUGOUT("Auto Read Done did not complete\n");
+ }
+
+ /* If EEPROM is not present, run manual init scripts */
+- if ((rd32(E1000_EECD) & E1000_EECD_PRES) == 0)
+- igb_reset_init_script_82575(hw);
++ if (!(E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_PRES))
++ e1000_reset_init_script_82575(hw);
+
+ /* Clear any pending interrupt events. */
+- wr32(E1000_IMC, 0xffffffff);
+- rd32(E1000_ICR);
++ E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
++ E1000_READ_REG(hw, E1000_ICR);
+
+ /* Install any alternate MAC address into RAR0 */
+- ret_val = igb_check_alt_mac_addr(hw);
++ ret_val = igb_e1000_check_alt_mac_addr_generic(hw);
+
+ return ret_val;
+ }
+
+ /**
+- * igb_init_hw_82575 - Initialize hardware
++ * e1000_init_hw_82575 - Initialize hardware
+ * @hw: pointer to the HW structure
+ *
+ * This inits the hardware readying it for operation.
+ **/
+-static s32 igb_init_hw_82575(struct e1000_hw *hw)
++s32 e1000_init_hw_82575(struct e1000_hw *hw)
+ {
+ struct e1000_mac_info *mac = &hw->mac;
+ s32 ret_val;
+ u16 i, rar_count = mac->rar_entry_count;
+
+- if ((hw->mac.type >= e1000_i210) &&
+- !(igb_get_flash_presence_i210(hw))) {
+- ret_val = igb_pll_workaround_i210(hw);
+- if (ret_val)
+- return ret_val;
+- }
++ DEBUGFUNC("e1000_init_hw_82575");
+
+ /* Initialize identification LED */
+- ret_val = igb_id_led_init(hw);
++ ret_val = mac->ops.id_led_init(hw);
+ if (ret_val) {
+- hw_dbg("Error initializing identification LED\n");
++ DEBUGOUT("Error initializing identification LED\n");
+ /* This is not fatal and we should not stop init due to this */
+ }
+
+ /* Disabling VLAN filtering */
+- hw_dbg("Initializing the IEEE VLAN\n");
+- if ((hw->mac.type == e1000_i350) || (hw->mac.type == e1000_i354))
+- igb_clear_vfta_i350(hw);
+- else
+- igb_clear_vfta(hw);
++ DEBUGOUT("Initializing the IEEE VLAN\n");
++ mac->ops.clear_vfta(hw);
+
+ /* Setup the receive address */
+- igb_init_rx_addrs(hw, rar_count);
++ e1000_init_rx_addrs_generic(hw, rar_count);
+
+ /* Zero out the Multicast HASH table */
+- hw_dbg("Zeroing the MTA\n");
++ DEBUGOUT("Zeroing the MTA\n");
+ for (i = 0; i < mac->mta_reg_count; i++)
+- array_wr32(E1000_MTA, i, 0);
++ E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0);
+
+ /* Zero out the Unicast HASH table */
+- hw_dbg("Zeroing the UTA\n");
++ DEBUGOUT("Zeroing the UTA\n");
+ for (i = 0; i < mac->uta_reg_count; i++)
+- array_wr32(E1000_UTA, i, 0);
++ E1000_WRITE_REG_ARRAY(hw, E1000_UTA, i, 0);
+
+ /* Setup link and flow control */
+- ret_val = igb_setup_link(hw);
++ ret_val = mac->ops.setup_link(hw);
+
+- /* Clear all of the statistics registers (clear on read). It is
++ /* Set the default MTU size */
++ hw->dev_spec._82575.mtu = 1500;
++
++ /*
++ * Clear all of the statistics registers (clear on read). It is
+ * important that we do this after we have tried to establish link
+ * because the symbol error count will increment wildly if there
+ * is no link.
+ */
+- igb_clear_hw_cntrs_82575(hw);
++ e1000_clear_hw_cntrs_82575(hw);
++
+ return ret_val;
+ }
+
+ /**
+- * igb_setup_copper_link_82575 - Configure copper link settings
++ * e1000_setup_copper_link_82575 - Configure copper link settings
+ * @hw: pointer to the HW structure
+ *
+ * Configures the link for auto-neg or forced speed and duplex. Then we check
+ * for link, once link is established calls to configure collision distance
+ * and flow control are called.
+ **/
+-static s32 igb_setup_copper_link_82575(struct e1000_hw *hw)
++static s32 e1000_setup_copper_link_82575(struct e1000_hw *hw)
+ {
+ u32 ctrl;
+- s32 ret_val;
++ s32 ret_val;
+ u32 phpm_reg;
+
+- ctrl = rd32(E1000_CTRL);
++ DEBUGFUNC("e1000_setup_copper_link_82575");
++
++ ctrl = E1000_READ_REG(hw, E1000_CTRL);
+ ctrl |= E1000_CTRL_SLU;
+ ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
+- wr32(E1000_CTRL, ctrl);
++ E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
+
+ /* Clear Go Link Disconnect bit on supported devices */
+ switch (hw->mac.type) {
+@@ -1552,25 +1557,25 @@
+ case e1000_i350:
+ case e1000_i210:
+ case e1000_i211:
+- phpm_reg = rd32(E1000_82580_PHY_POWER_MGMT);
++ phpm_reg = E1000_READ_REG(hw, E1000_82580_PHY_POWER_MGMT);
+ phpm_reg &= ~E1000_82580_PM_GO_LINKD;
+- wr32(E1000_82580_PHY_POWER_MGMT, phpm_reg);
++ E1000_WRITE_REG(hw, E1000_82580_PHY_POWER_MGMT, phpm_reg);
+ break;
+ default:
+ break;
+ }
+
+- ret_val = igb_setup_serdes_link_82575(hw);
++ ret_val = e1000_setup_serdes_link_82575(hw);
+ if (ret_val)
+ goto out;
+
+- if (igb_sgmii_active_82575(hw) && !hw->phy.reset_disable) {
++ if (e1000_sgmii_active_82575(hw) && !hw->phy.reset_disable) {
+ /* allow time for SFP cage time to power up phy */
+- msleep(300);
++ msec_delay(300);
+
+ ret_val = hw->phy.ops.reset(hw);
+ if (ret_val) {
+- hw_dbg("Error resetting the PHY.\n");
++ DEBUGOUT("Error resetting the PHY.\n");
+ goto out;
+ }
+ }
+@@ -1580,20 +1585,22 @@
+ switch (hw->phy.id) {
+ case I347AT4_E_PHY_ID:
+ case M88E1112_E_PHY_ID:
++ case M88E1340M_E_PHY_ID:
+ case M88E1543_E_PHY_ID:
++ case M88E1512_E_PHY_ID:
+ case I210_I_PHY_ID:
+- ret_val = igb_copper_link_setup_m88_gen2(hw);
++ ret_val = e1000_copper_link_setup_m88_gen2(hw);
+ break;
+ default:
+- ret_val = igb_copper_link_setup_m88(hw);
++ ret_val = e1000_copper_link_setup_m88(hw);
+ break;
+ }
+ break;
+ case e1000_phy_igp_3:
+- ret_val = igb_copper_link_setup_igp(hw);
++ ret_val = e1000_copper_link_setup_igp(hw);
+ break;
+ case e1000_phy_82580:
+- ret_val = igb_copper_link_setup_82580(hw);
++ ret_val = igb_e1000_copper_link_setup_82577(hw);
+ break;
+ default:
+ ret_val = -E1000_ERR_PHY;
+@@ -1603,13 +1610,13 @@
+ if (ret_val)
+ goto out;
+
+- ret_val = igb_setup_copper_link(hw);
++ ret_val = e1000_setup_copper_link_generic(hw);
+ out:
+ return ret_val;
+ }
+
+ /**
+- * igb_setup_serdes_link_82575 - Setup link for serdes
++ * e1000_setup_serdes_link_82575 - Setup link for serdes
+ * @hw: pointer to the HW structure
+ *
+ * Configure the physical coding sub-layer (PCS) link. The PCS link is
+@@ -1617,45 +1624,40 @@
+ * interface (sgmii), or serdes fiber is being used. Configures the link
+ * for auto-negotiation or forces speed/duplex.
+ **/
+-static s32 igb_setup_serdes_link_82575(struct e1000_hw *hw)
++static s32 e1000_setup_serdes_link_82575(struct e1000_hw *hw)
+ {
+ u32 ctrl_ext, ctrl_reg, reg, anadv_reg;
+ bool pcs_autoneg;
+- s32 ret_val = 0;
++ s32 ret_val = E1000_SUCCESS;
+ u16 data;
+
++ DEBUGFUNC("e1000_setup_serdes_link_82575");
++
+ if ((hw->phy.media_type != e1000_media_type_internal_serdes) &&
+- !igb_sgmii_active_82575(hw))
++ !e1000_sgmii_active_82575(hw))
+ return ret_val;
+
+-
+- /* On the 82575, SerDes loopback mode persists until it is
++ /*
++ * On the 82575, SerDes loopback mode persists until it is
+ * explicitly turned off or a power cycle is performed. A read to
+ * the register does not indicate its status. Therefore, we ensure
+ * loopback mode is disabled during initialization.
+ */
+- wr32(E1000_SCTL, E1000_SCTL_DISABLE_SERDES_LOOPBACK);
++ E1000_WRITE_REG(hw, E1000_SCTL, E1000_SCTL_DISABLE_SERDES_LOOPBACK);
+
+- /* power on the sfp cage if present and turn on I2C */
+- ctrl_ext = rd32(E1000_CTRL_EXT);
++ /* power on the sfp cage if present */
++ ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
+ ctrl_ext &= ~E1000_CTRL_EXT_SDP3_DATA;
+- ctrl_ext |= E1000_CTRL_I2C_ENA;
+- wr32(E1000_CTRL_EXT, ctrl_ext);
++ E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
+
+- ctrl_reg = rd32(E1000_CTRL);
++ ctrl_reg = E1000_READ_REG(hw, E1000_CTRL);
+ ctrl_reg |= E1000_CTRL_SLU;
+
+- if (hw->mac.type == e1000_82575 || hw->mac.type == e1000_82576) {
+- /* set both sw defined pins */
++ /* set both sw defined pins on 82575/82576*/
++ if (hw->mac.type == e1000_82575 || hw->mac.type == e1000_82576)
+ ctrl_reg |= E1000_CTRL_SWDPIN0 | E1000_CTRL_SWDPIN1;
+
+- /* Set switch control to serdes energy detect */
+- reg = rd32(E1000_CONNSW);
+- reg |= E1000_CONNSW_ENRGSRC;
+- wr32(E1000_CONNSW, reg);
+- }
+-
+- reg = rd32(E1000_PCS_LCTL);
++ reg = E1000_READ_REG(hw, E1000_PCS_LCTL);
+
+ /* default pcs_autoneg to the same setting as mac autoneg */
+ pcs_autoneg = hw->mac.autoneg;
+@@ -1670,12 +1672,13 @@
+ case E1000_CTRL_EXT_LINK_MODE_1000BASE_KX:
+ /* disable PCS autoneg and support parallel detect only */
+ pcs_autoneg = false;
++ /* fall through to default case */
+ default:
+ if (hw->mac.type == e1000_82575 ||
+ hw->mac.type == e1000_82576) {
+ ret_val = hw->nvm.ops.read(hw, NVM_COMPAT, 1, &data);
+ if (ret_val) {
+- hw_dbg(KERN_DEBUG "NVM Read Error\n\n");
++ DEBUGOUT("NVM Read Error\n");
+ return ret_val;
+ }
+
+@@ -1683,27 +1686,29 @@
+ pcs_autoneg = false;
+ }
+
+- /* non-SGMII modes only supports a speed of 1000/Full for the
++ /*
++ * non-SGMII modes only supports a speed of 1000/Full for the
+ * link so it is best to just force the MAC and let the pcs
+ * link either autoneg or be forced to 1000/Full
+ */
+ ctrl_reg |= E1000_CTRL_SPD_1000 | E1000_CTRL_FRCSPD |
+- E1000_CTRL_FD | E1000_CTRL_FRCDPX;
++ E1000_CTRL_FD | E1000_CTRL_FRCDPX;
+
+ /* set speed of 1000/Full if speed/duplex is forced */
+ reg |= E1000_PCS_LCTL_FSV_1000 | E1000_PCS_LCTL_FDV_FULL;
+ break;
+ }
+
+- wr32(E1000_CTRL, ctrl_reg);
++ E1000_WRITE_REG(hw, E1000_CTRL, ctrl_reg);
+
+- /* New SerDes mode allows for forcing speed or autonegotiating speed
++ /*
++ * New SerDes mode allows for forcing speed or autonegotiating speed
+ * at 1gb. Autoneg should be default set by most drivers. This is the
+ * mode that will be compatible with older link partners and switches.
+ * However, both are supported by the hardware and some drivers/tools.
+ */
+ reg &= ~(E1000_PCS_LCTL_AN_ENABLE | E1000_PCS_LCTL_FLV_LINK_UP |
+- E1000_PCS_LCTL_FSD | E1000_PCS_LCTL_FORCE_LINK);
++ E1000_PCS_LCTL_FSD | E1000_PCS_LCTL_FORCE_LINK);
+
+ if (pcs_autoneg) {
+ /* Set PCS register for autoneg */
+@@ -1714,8 +1719,9 @@
+ reg &= ~E1000_PCS_LCTL_FORCE_FCTRL;
+
+ /* Configure flow control advertisement for autoneg */
+- anadv_reg = rd32(E1000_PCS_ANADV);
++ anadv_reg = E1000_READ_REG(hw, E1000_PCS_ANADV);
+ anadv_reg &= ~(E1000_TXCW_ASM_DIR | E1000_TXCW_PAUSE);
++
+ switch (hw->fc.requested_mode) {
+ case e1000_fc_full:
+ case e1000_fc_rx_pause:
+@@ -1728,251 +1734,480 @@
+ default:
+ break;
+ }
+- wr32(E1000_PCS_ANADV, anadv_reg);
+
+- hw_dbg("Configuring Autoneg:PCS_LCTL=0x%08X\n", reg);
++ E1000_WRITE_REG(hw, E1000_PCS_ANADV, anadv_reg);
++
++ DEBUGOUT1("Configuring Autoneg:PCS_LCTL=0x%08X\n", reg);
+ } else {
+ /* Set PCS register for forced link */
+- reg |= E1000_PCS_LCTL_FSD; /* Force Speed */
++ reg |= E1000_PCS_LCTL_FSD; /* Force Speed */
+
+ /* Force flow control for forced link */
+ reg |= E1000_PCS_LCTL_FORCE_FCTRL;
+
+- hw_dbg("Configuring Forced Link:PCS_LCTL=0x%08X\n", reg);
++ DEBUGOUT1("Configuring Forced Link:PCS_LCTL=0x%08X\n", reg);
+ }
+
+- wr32(E1000_PCS_LCTL, reg);
++ E1000_WRITE_REG(hw, E1000_PCS_LCTL, reg);
+
+- if (!pcs_autoneg && !igb_sgmii_active_82575(hw))
+- igb_force_mac_fc(hw);
++ if (!pcs_autoneg && !e1000_sgmii_active_82575(hw))
++ e1000_force_mac_fc_generic(hw);
+
+ return ret_val;
+ }
+
+ /**
+- * igb_sgmii_active_82575 - Return sgmii state
++ * e1000_get_media_type_82575 - derives current media type.
+ * @hw: pointer to the HW structure
+ *
+- * 82575 silicon has a serialized gigabit media independent interface (sgmii)
+- * which can be enabled for use in the embedded applications. Simply
+- * return the current state of the sgmii interface.
++ * The media type is chosen reflecting few settings.
++ * The following are taken into account:
++ * - link mode set in the current port Init Control Word #3
++ * - current link mode settings in CSR register
++ * - MDIO vs. I2C PHY control interface chosen
++ * - SFP module media type
+ **/
+-static bool igb_sgmii_active_82575(struct e1000_hw *hw)
++static s32 e1000_get_media_type_82575(struct e1000_hw *hw)
+ {
+ struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575;
+- return dev_spec->sgmii_active;
+-}
+-
+-/**
+- * igb_reset_init_script_82575 - Inits HW defaults after reset
+- * @hw: pointer to the HW structure
+- *
+- * Inits recommended HW defaults after a reset when there is no EEPROM
+- * detected. This is only for the 82575.
+- **/
+-static s32 igb_reset_init_script_82575(struct e1000_hw *hw)
+-{
+- if (hw->mac.type == e1000_82575) {
+- hw_dbg("Running reset init script for 82575\n");
+- /* SerDes configuration via SERDESCTRL */
+- igb_write_8bit_ctrl_reg(hw, E1000_SCTL, 0x00, 0x0C);
+- igb_write_8bit_ctrl_reg(hw, E1000_SCTL, 0x01, 0x78);
+- igb_write_8bit_ctrl_reg(hw, E1000_SCTL, 0x1B, 0x23);
+- igb_write_8bit_ctrl_reg(hw, E1000_SCTL, 0x23, 0x15);
++ s32 ret_val = E1000_SUCCESS;
++ u32 ctrl_ext = 0;
++ u32 link_mode = 0;
+
+- /* CCM configuration via CCMCTL register */
+- igb_write_8bit_ctrl_reg(hw, E1000_CCMCTL, 0x14, 0x00);
+- igb_write_8bit_ctrl_reg(hw, E1000_CCMCTL, 0x10, 0x00);
++ /* Set internal phy as default */
++ dev_spec->sgmii_active = false;
++ dev_spec->module_plugged = false;
+
+- /* PCIe lanes configuration */
+- igb_write_8bit_ctrl_reg(hw, E1000_GIOCTL, 0x00, 0xEC);
+- igb_write_8bit_ctrl_reg(hw, E1000_GIOCTL, 0x61, 0xDF);
+- igb_write_8bit_ctrl_reg(hw, E1000_GIOCTL, 0x34, 0x05);
+- igb_write_8bit_ctrl_reg(hw, E1000_GIOCTL, 0x2F, 0x81);
++ /* Get CSR setting */
++ ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
+
+- /* PCIe PLL Configuration */
+- igb_write_8bit_ctrl_reg(hw, E1000_SCCTL, 0x02, 0x47);
+- igb_write_8bit_ctrl_reg(hw, E1000_SCCTL, 0x14, 0x00);
+- igb_write_8bit_ctrl_reg(hw, E1000_SCCTL, 0x10, 0x00);
+- }
++ /* extract link mode setting */
++ link_mode = ctrl_ext & E1000_CTRL_EXT_LINK_MODE_MASK;
+
+- return 0;
+-}
++ switch (link_mode) {
++ case E1000_CTRL_EXT_LINK_MODE_1000BASE_KX:
++ hw->phy.media_type = e1000_media_type_internal_serdes;
++ break;
++ case E1000_CTRL_EXT_LINK_MODE_GMII:
++ hw->phy.media_type = e1000_media_type_copper;
++ break;
++ case E1000_CTRL_EXT_LINK_MODE_SGMII:
++ /* Get phy control interface type set (MDIO vs. I2C)*/
++ if (e1000_sgmii_uses_mdio_82575(hw)) {
++ hw->phy.media_type = e1000_media_type_copper;
++ dev_spec->sgmii_active = true;
++ break;
++ }
++ /* fall through for I2C based SGMII */
++ case E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES:
++ /* read media type from SFP EEPROM */
++ ret_val = e1000_set_sfp_media_type_82575(hw);
++ if ((ret_val != E1000_SUCCESS) ||
++ (hw->phy.media_type == e1000_media_type_unknown)) {
++ /*
++ * If media type was not identified then return media
++ * type defined by the CTRL_EXT settings.
++ */
++ hw->phy.media_type = e1000_media_type_internal_serdes;
+
+-/**
+- * igb_read_mac_addr_82575 - Read device MAC address
+- * @hw: pointer to the HW structure
+- **/
+-static s32 igb_read_mac_addr_82575(struct e1000_hw *hw)
+-{
+- s32 ret_val = 0;
++ if (link_mode == E1000_CTRL_EXT_LINK_MODE_SGMII) {
++ hw->phy.media_type = e1000_media_type_copper;
++ dev_spec->sgmii_active = true;
++ }
+
+- /* If there's an alternate MAC address place it in RAR0
+- * so that it will override the Si installed default perm
+- * address.
+- */
+- ret_val = igb_check_alt_mac_addr(hw);
+- if (ret_val)
+- goto out;
++ break;
++ }
+
+- ret_val = igb_read_mac_addr(hw);
++ /* do not change link mode for 100BaseFX */
++ if (dev_spec->eth_flags.e100_base_fx)
++ break;
++
++ /* change current link mode setting */
++ ctrl_ext &= ~E1000_CTRL_EXT_LINK_MODE_MASK;
++
++ if (hw->phy.media_type == e1000_media_type_copper)
++ ctrl_ext |= E1000_CTRL_EXT_LINK_MODE_SGMII;
++ else
++ ctrl_ext |= E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES;
++
++ E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
++
++ break;
++ }
++
++ return ret_val;
++}
++
++/**
++ * e1000_set_sfp_media_type_82575 - derives SFP module media type.
++ * @hw: pointer to the HW structure
++ *
++ * The media type is chosen based on SFP module.
++ * compatibility flags retrieved from SFP ID EEPROM.
++ **/
++static s32 e1000_set_sfp_media_type_82575(struct e1000_hw *hw)
++{
++ s32 ret_val = E1000_ERR_CONFIG;
++ u32 ctrl_ext = 0;
++ struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575;
++ struct sfp_e1000_flags *eth_flags = &dev_spec->eth_flags;
++ u8 tranceiver_type = 0;
++ s32 timeout = 3;
++
++ /* Turn I2C interface ON and power on sfp cage */
++ ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
++ ctrl_ext &= ~E1000_CTRL_EXT_SDP3_DATA;
++ E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext | E1000_CTRL_I2C_ENA);
++
++ E1000_WRITE_FLUSH(hw);
++
++ /* Read SFP module data */
++ while (timeout) {
++ ret_val = e1000_read_sfp_data_byte(hw,
++ E1000_I2CCMD_SFP_DATA_ADDR(E1000_SFF_IDENTIFIER_OFFSET),
++ &tranceiver_type);
++ if (ret_val == E1000_SUCCESS)
++ break;
++ msec_delay(100);
++ timeout--;
++ }
++ if (ret_val != E1000_SUCCESS)
++ goto out;
++
++ ret_val = e1000_read_sfp_data_byte(hw,
++ E1000_I2CCMD_SFP_DATA_ADDR(E1000_SFF_ETH_FLAGS_OFFSET),
++ (u8 *)eth_flags);
++ if (ret_val != E1000_SUCCESS)
++ goto out;
++
++ /* Check if there is some SFP module plugged and powered */
++ if ((tranceiver_type == E1000_SFF_IDENTIFIER_SFP) ||
++ (tranceiver_type == E1000_SFF_IDENTIFIER_SFF)) {
++ dev_spec->module_plugged = true;
++ if (eth_flags->e1000_base_lx || eth_flags->e1000_base_sx) {
++ hw->phy.media_type = e1000_media_type_internal_serdes;
++ } else if (eth_flags->e100_base_fx) {
++ dev_spec->sgmii_active = true;
++ hw->phy.media_type = e1000_media_type_internal_serdes;
++ } else if (eth_flags->e1000_base_t) {
++ dev_spec->sgmii_active = true;
++ hw->phy.media_type = e1000_media_type_copper;
++ } else {
++ hw->phy.media_type = e1000_media_type_unknown;
++ DEBUGOUT("PHY module has not been recognized\n");
++ goto out;
++ }
++ } else {
++ hw->phy.media_type = e1000_media_type_unknown;
++ }
++ ret_val = E1000_SUCCESS;
++out:
++ /* Restore I2C interface setting */
++ E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
++ return ret_val;
++}
++
++/**
++ * e1000_valid_led_default_82575 - Verify a valid default LED config
++ * @hw: pointer to the HW structure
++ * @data: pointer to the NVM (EEPROM)
++ *
++ * Read the EEPROM for the current default LED configuration. If the
++ * LED configuration is not valid, set to a valid LED configuration.
++ **/
++static s32 e1000_valid_led_default_82575(struct e1000_hw *hw, u16 *data)
++{
++ s32 ret_val;
++
++ DEBUGFUNC("e1000_valid_led_default_82575");
++
++ ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data);
++ if (ret_val) {
++ DEBUGOUT("NVM Read Error\n");
++ goto out;
++ }
++
++ if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF) {
++ switch (hw->phy.media_type) {
++ case e1000_media_type_internal_serdes:
++ *data = ID_LED_DEFAULT_82575_SERDES;
++ break;
++ case e1000_media_type_copper:
++ default:
++ *data = ID_LED_DEFAULT;
++ break;
++ }
++ }
++out:
++ return ret_val;
++}
++
++/**
++ * e1000_sgmii_active_82575 - Return sgmii state
++ * @hw: pointer to the HW structure
++ *
++ * 82575 silicon has a serialized gigabit media independent interface (sgmii)
++ * which can be enabled for use in the embedded applications. Simply
++ * return the current state of the sgmii interface.
++ **/
++static bool e1000_sgmii_active_82575(struct e1000_hw *hw)
++{
++ struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575;
++ return dev_spec->sgmii_active;
++}
++
++/**
++ * e1000_reset_init_script_82575 - Inits HW defaults after reset
++ * @hw: pointer to the HW structure
++ *
++ * Inits recommended HW defaults after a reset when there is no EEPROM
++ * detected. This is only for the 82575.
++ **/
++static s32 e1000_reset_init_script_82575(struct e1000_hw *hw)
++{
++ DEBUGFUNC("e1000_reset_init_script_82575");
++
++ if (hw->mac.type == e1000_82575) {
++ DEBUGOUT("Running reset init script for 82575\n");
++ /* SerDes configuration via SERDESCTRL */
++ e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCTL, 0x00, 0x0C);
++ e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCTL, 0x01, 0x78);
++ e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCTL, 0x1B, 0x23);
++ e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCTL, 0x23, 0x15);
++
++ /* CCM configuration via CCMCTL register */
++ e1000_write_8bit_ctrl_reg_generic(hw, E1000_CCMCTL, 0x14, 0x00);
++ e1000_write_8bit_ctrl_reg_generic(hw, E1000_CCMCTL, 0x10, 0x00);
++
++ /* PCIe lanes configuration */
++ e1000_write_8bit_ctrl_reg_generic(hw, E1000_GIOCTL, 0x00, 0xEC);
++ e1000_write_8bit_ctrl_reg_generic(hw, E1000_GIOCTL, 0x61, 0xDF);
++ e1000_write_8bit_ctrl_reg_generic(hw, E1000_GIOCTL, 0x34, 0x05);
++ e1000_write_8bit_ctrl_reg_generic(hw, E1000_GIOCTL, 0x2F, 0x81);
++
++ /* PCIe PLL Configuration */
++ e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCCTL, 0x02, 0x47);
++ e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCCTL, 0x14, 0x00);
++ e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCCTL, 0x10, 0x00);
++ }
++
++ return E1000_SUCCESS;
++}
++
++/**
++ * e1000_read_mac_addr_82575 - Read device MAC address
++ * @hw: pointer to the HW structure
++ **/
++static s32 e1000_read_mac_addr_82575(struct e1000_hw *hw)
++{
++ s32 ret_val;
++
++ DEBUGFUNC("e1000_read_mac_addr_82575");
++
++ /*
++ * If there's an alternate MAC address place it in RAR0
++ * so that it will override the Si installed default perm
++ * address.
++ */
++ ret_val = igb_e1000_check_alt_mac_addr_generic(hw);
++ if (ret_val)
++ goto out;
++
++ ret_val = igb_e1000_read_mac_addr_generic(hw);
+
+ out:
+ return ret_val;
+ }
+
+ /**
+- * igb_power_down_phy_copper_82575 - Remove link during PHY power down
++ * e1000_config_collision_dist_82575 - Configure collision distance
++ * @hw: pointer to the HW structure
++ *
++ * Configures the collision distance to the default value and is used
++ * during link setup.
++ **/
++static void e1000_config_collision_dist_82575(struct e1000_hw *hw)
++{
++ u32 tctl_ext;
++
++ DEBUGFUNC("e1000_config_collision_dist_82575");
++
++ tctl_ext = E1000_READ_REG(hw, E1000_TCTL_EXT);
++
++ tctl_ext &= ~E1000_TCTL_EXT_COLD;
++ tctl_ext |= E1000_COLLISION_DISTANCE << E1000_TCTL_EXT_COLD_SHIFT;
++
++ E1000_WRITE_REG(hw, E1000_TCTL_EXT, tctl_ext);
++ E1000_WRITE_FLUSH(hw);
++}
++
++/**
++ * e1000_power_down_phy_copper_82575 - Remove link during PHY power down
+ * @hw: pointer to the HW structure
+ *
+ * In the case of a PHY power down to save power, or to turn off link during a
+ * driver unload, or wake on lan is not enabled, remove the link.
+ **/
+-void igb_power_down_phy_copper_82575(struct e1000_hw *hw)
++static void e1000_power_down_phy_copper_82575(struct e1000_hw *hw)
+ {
++ struct e1000_phy_info *phy = &hw->phy;
++
++ if (!(phy->ops.check_reset_block))
++ return;
++
+ /* If the management interface is not enabled, then power down */
+- if (!(igb_enable_mng_pass_thru(hw) || igb_check_reset_block(hw)))
+- igb_power_down_phy_copper(hw);
++ if (!(igb_e1000_enable_mng_pass_thru(hw) || phy->ops.check_reset_block(hw)))
++ igb_e1000_power_down_phy_copper(hw);
++
++ return;
+ }
+
+ /**
+- * igb_clear_hw_cntrs_82575 - Clear device specific hardware counters
++ * e1000_clear_hw_cntrs_82575 - Clear device specific hardware counters
+ * @hw: pointer to the HW structure
+ *
+ * Clears the hardware counters by reading the counter registers.
+ **/
+-static void igb_clear_hw_cntrs_82575(struct e1000_hw *hw)
++static void e1000_clear_hw_cntrs_82575(struct e1000_hw *hw)
+ {
+- igb_clear_hw_cntrs_base(hw);
++ DEBUGFUNC("e1000_clear_hw_cntrs_82575");
+
+- rd32(E1000_PRC64);
+- rd32(E1000_PRC127);
+- rd32(E1000_PRC255);
+- rd32(E1000_PRC511);
+- rd32(E1000_PRC1023);
+- rd32(E1000_PRC1522);
+- rd32(E1000_PTC64);
+- rd32(E1000_PTC127);
+- rd32(E1000_PTC255);
+- rd32(E1000_PTC511);
+- rd32(E1000_PTC1023);
+- rd32(E1000_PTC1522);
+-
+- rd32(E1000_ALGNERRC);
+- rd32(E1000_RXERRC);
+- rd32(E1000_TNCRS);
+- rd32(E1000_CEXTERR);
+- rd32(E1000_TSCTC);
+- rd32(E1000_TSCTFC);
+-
+- rd32(E1000_MGTPRC);
+- rd32(E1000_MGTPDC);
+- rd32(E1000_MGTPTC);
+-
+- rd32(E1000_IAC);
+- rd32(E1000_ICRXOC);
+-
+- rd32(E1000_ICRXPTC);
+- rd32(E1000_ICRXATC);
+- rd32(E1000_ICTXPTC);
+- rd32(E1000_ICTXATC);
+- rd32(E1000_ICTXQEC);
+- rd32(E1000_ICTXQMTC);
+- rd32(E1000_ICRXDMTC);
+-
+- rd32(E1000_CBTMPC);
+- rd32(E1000_HTDPMC);
+- rd32(E1000_CBRMPC);
+- rd32(E1000_RPTHC);
+- rd32(E1000_HGPTC);
+- rd32(E1000_HTCBDPC);
+- rd32(E1000_HGORCL);
+- rd32(E1000_HGORCH);
+- rd32(E1000_HGOTCL);
+- rd32(E1000_HGOTCH);
+- rd32(E1000_LENERRS);
++ e1000_clear_hw_cntrs_base_generic(hw);
++
++ E1000_READ_REG(hw, E1000_PRC64);
++ E1000_READ_REG(hw, E1000_PRC127);
++ E1000_READ_REG(hw, E1000_PRC255);
++ E1000_READ_REG(hw, E1000_PRC511);
++ E1000_READ_REG(hw, E1000_PRC1023);
++ E1000_READ_REG(hw, E1000_PRC1522);
++ E1000_READ_REG(hw, E1000_PTC64);
++ E1000_READ_REG(hw, E1000_PTC127);
++ E1000_READ_REG(hw, E1000_PTC255);
++ E1000_READ_REG(hw, E1000_PTC511);
++ E1000_READ_REG(hw, E1000_PTC1023);
++ E1000_READ_REG(hw, E1000_PTC1522);
++
++ E1000_READ_REG(hw, E1000_ALGNERRC);
++ E1000_READ_REG(hw, E1000_RXERRC);
++ E1000_READ_REG(hw, E1000_TNCRS);
++ E1000_READ_REG(hw, E1000_CEXTERR);
++ E1000_READ_REG(hw, E1000_TSCTC);
++ E1000_READ_REG(hw, E1000_TSCTFC);
++
++ E1000_READ_REG(hw, E1000_MGTPRC);
++ E1000_READ_REG(hw, E1000_MGTPDC);
++ E1000_READ_REG(hw, E1000_MGTPTC);
++
++ E1000_READ_REG(hw, E1000_IAC);
++ E1000_READ_REG(hw, E1000_ICRXOC);
++
++ E1000_READ_REG(hw, E1000_ICRXPTC);
++ E1000_READ_REG(hw, E1000_ICRXATC);
++ E1000_READ_REG(hw, E1000_ICTXPTC);
++ E1000_READ_REG(hw, E1000_ICTXATC);
++ E1000_READ_REG(hw, E1000_ICTXQEC);
++ E1000_READ_REG(hw, E1000_ICTXQMTC);
++ E1000_READ_REG(hw, E1000_ICRXDMTC);
++
++ E1000_READ_REG(hw, E1000_CBTMPC);
++ E1000_READ_REG(hw, E1000_HTDPMC);
++ E1000_READ_REG(hw, E1000_CBRMPC);
++ E1000_READ_REG(hw, E1000_RPTHC);
++ E1000_READ_REG(hw, E1000_HGPTC);
++ E1000_READ_REG(hw, E1000_HTCBDPC);
++ E1000_READ_REG(hw, E1000_HGORCL);
++ E1000_READ_REG(hw, E1000_HGORCH);
++ E1000_READ_REG(hw, E1000_HGOTCL);
++ E1000_READ_REG(hw, E1000_HGOTCH);
++ E1000_READ_REG(hw, E1000_LENERRS);
+
+ /* This register should not be read in copper configurations */
+- if (hw->phy.media_type == e1000_media_type_internal_serdes ||
+- igb_sgmii_active_82575(hw))
+- rd32(E1000_SCVPC);
++ if ((hw->phy.media_type == e1000_media_type_internal_serdes) ||
++ e1000_sgmii_active_82575(hw))
++ E1000_READ_REG(hw, E1000_SCVPC);
+ }
+
+ /**
+- * igb_rx_fifo_flush_82575 - Clean rx fifo after RX enable
++ * e1000_rx_fifo_flush_82575 - Clean rx fifo after Rx enable
+ * @hw: pointer to the HW structure
+ *
+- * After rx enable if managability is enabled then there is likely some
++ * After Rx enable, if manageability is enabled then there is likely some
+ * bad data at the start of the fifo and possibly in the DMA fifo. This
+ * function clears the fifos and flushes any packets that came in as rx was
+ * being enabled.
+ **/
+-void igb_rx_fifo_flush_82575(struct e1000_hw *hw)
++void e1000_rx_fifo_flush_82575(struct e1000_hw *hw)
+ {
+ u32 rctl, rlpml, rxdctl[4], rfctl, temp_rctl, rx_enabled;
+ int i, ms_wait;
+
++ DEBUGFUNC("e1000_rx_fifo_flush_82575");
++
++ /* disable IPv6 options as per hardware errata */
++ rfctl = E1000_READ_REG(hw, E1000_RFCTL);
++ rfctl |= E1000_RFCTL_IPV6_EX_DIS;
++ E1000_WRITE_REG(hw, E1000_RFCTL, rfctl);
++
+ if (hw->mac.type != e1000_82575 ||
+- !(rd32(E1000_MANC) & E1000_MANC_RCV_TCO_EN))
++ !(E1000_READ_REG(hw, E1000_MANC) & E1000_MANC_RCV_TCO_EN))
+ return;
+
+- /* Disable all RX queues */
++ /* Disable all Rx queues */
+ for (i = 0; i < 4; i++) {
+- rxdctl[i] = rd32(E1000_RXDCTL(i));
+- wr32(E1000_RXDCTL(i),
+- rxdctl[i] & ~E1000_RXDCTL_QUEUE_ENABLE);
++ rxdctl[i] = E1000_READ_REG(hw, E1000_RXDCTL(i));
++ E1000_WRITE_REG(hw, E1000_RXDCTL(i),
++ rxdctl[i] & ~E1000_RXDCTL_QUEUE_ENABLE);
+ }
+ /* Poll all queues to verify they have shut down */
+ for (ms_wait = 0; ms_wait < 10; ms_wait++) {
+- usleep_range(1000, 2000);
++ msec_delay(1);
+ rx_enabled = 0;
+ for (i = 0; i < 4; i++)
+- rx_enabled |= rd32(E1000_RXDCTL(i));
++ rx_enabled |= E1000_READ_REG(hw, E1000_RXDCTL(i));
+ if (!(rx_enabled & E1000_RXDCTL_QUEUE_ENABLE))
+ break;
+ }
+
+ if (ms_wait == 10)
+- hw_dbg("Queue disable timed out after 10ms\n");
++ DEBUGOUT("Queue disable timed out after 10ms\n");
+
+ /* Clear RLPML, RCTL.SBP, RFCTL.LEF, and set RCTL.LPE so that all
+ * incoming packets are rejected. Set enable and wait 2ms so that
+ * any packet that was coming in as RCTL.EN was set is flushed
+ */
+- rfctl = rd32(E1000_RFCTL);
+- wr32(E1000_RFCTL, rfctl & ~E1000_RFCTL_LEF);
++ E1000_WRITE_REG(hw, E1000_RFCTL, rfctl & ~E1000_RFCTL_LEF);
+
+- rlpml = rd32(E1000_RLPML);
+- wr32(E1000_RLPML, 0);
++ rlpml = E1000_READ_REG(hw, E1000_RLPML);
++ E1000_WRITE_REG(hw, E1000_RLPML, 0);
+
+- rctl = rd32(E1000_RCTL);
++ rctl = E1000_READ_REG(hw, E1000_RCTL);
+ temp_rctl = rctl & ~(E1000_RCTL_EN | E1000_RCTL_SBP);
+ temp_rctl |= E1000_RCTL_LPE;
+
+- wr32(E1000_RCTL, temp_rctl);
+- wr32(E1000_RCTL, temp_rctl | E1000_RCTL_EN);
+- wrfl();
+- usleep_range(2000, 3000);
++ E1000_WRITE_REG(hw, E1000_RCTL, temp_rctl);
++ E1000_WRITE_REG(hw, E1000_RCTL, temp_rctl | E1000_RCTL_EN);
++ E1000_WRITE_FLUSH(hw);
++ msec_delay(2);
+
+- /* Enable RX queues that were previously enabled and restore our
++ /* Enable Rx queues that were previously enabled and restore our
+ * previous state
+ */
+ for (i = 0; i < 4; i++)
+- wr32(E1000_RXDCTL(i), rxdctl[i]);
+- wr32(E1000_RCTL, rctl);
+- wrfl();
++ E1000_WRITE_REG(hw, E1000_RXDCTL(i), rxdctl[i]);
++ E1000_WRITE_REG(hw, E1000_RCTL, rctl);
++ E1000_WRITE_FLUSH(hw);
+
+- wr32(E1000_RLPML, rlpml);
+- wr32(E1000_RFCTL, rfctl);
++ E1000_WRITE_REG(hw, E1000_RLPML, rlpml);
++ E1000_WRITE_REG(hw, E1000_RFCTL, rfctl);
+
+ /* Flush receive errors generated by workaround */
+- rd32(E1000_ROC);
+- rd32(E1000_RNBC);
+- rd32(E1000_MPC);
++ E1000_READ_REG(hw, E1000_ROC);
++ E1000_READ_REG(hw, E1000_RNBC);
++ E1000_READ_REG(hw, E1000_MPC);
+ }
+
+ /**
+- * igb_set_pcie_completion_timeout - set pci-e completion timeout
++ * e1000_set_pcie_completion_timeout - set pci-e completion timeout
+ * @hw: pointer to the HW structure
+ *
+ * The defaults for 82575 and 82576 should be in the range of 50us to 50ms,
+@@ -1981,17 +2216,18 @@
+ * increase the value to either 10ms to 200ms for capability version 1 config,
+ * or 16ms to 55ms for version 2.
+ **/
+-static s32 igb_set_pcie_completion_timeout(struct e1000_hw *hw)
++static s32 e1000_set_pcie_completion_timeout(struct e1000_hw *hw)
+ {
+- u32 gcr = rd32(E1000_GCR);
+- s32 ret_val = 0;
++ u32 gcr = E1000_READ_REG(hw, E1000_GCR);
++ s32 ret_val = E1000_SUCCESS;
+ u16 pcie_devctl2;
+
+ /* only take action if timeout value is defaulted to 0 */
+ if (gcr & E1000_GCR_CMPL_TMOUT_MASK)
+ goto out;
+
+- /* if capabilities version is type 1 we can write the
++ /*
++ * if capababilities version is type 1 we can write the
+ * timeout of 10ms to 200ms through the GCR register
+ */
+ if (!(gcr & E1000_GCR_CAP_VER2)) {
+@@ -1999,36 +2235,37 @@
+ goto out;
+ }
+
+- /* for version 2 capabilities we need to write the config space
++ /*
++ * for version 2 capabilities we need to write the config space
+ * directly in order to set the completion timeout value for
+ * 16ms to 55ms
+ */
+- ret_val = igb_read_pcie_cap_reg(hw, PCIE_DEVICE_CONTROL2,
+- &pcie_devctl2);
++ ret_val = e1000_read_pcie_cap_reg(hw, PCIE_DEVICE_CONTROL2,
++ &pcie_devctl2);
+ if (ret_val)
+ goto out;
+
+ pcie_devctl2 |= PCIE_DEVICE_CONTROL2_16ms;
+
+- ret_val = igb_write_pcie_cap_reg(hw, PCIE_DEVICE_CONTROL2,
+- &pcie_devctl2);
++ ret_val = e1000_write_pcie_cap_reg(hw, PCIE_DEVICE_CONTROL2,
++ &pcie_devctl2);
+ out:
+ /* disable completion timeout resend */
+ gcr &= ~E1000_GCR_CMPL_TMOUT_RESEND;
+
+- wr32(E1000_GCR, gcr);
++ E1000_WRITE_REG(hw, E1000_GCR, gcr);
+ return ret_val;
+ }
+
+ /**
+- * igb_vmdq_set_anti_spoofing_pf - enable or disable anti-spoofing
++ * e1000_vmdq_set_anti_spoofing_pf - enable or disable anti-spoofing
+ * @hw: pointer to the hardware struct
+ * @enable: state to enter, either enabled or disabled
+ * @pf: Physical Function pool - do not set anti-spoofing for the PF
+ *
+ * enables/disables L2 switch anti-spoofing functionality.
+ **/
+-void igb_vmdq_set_anti_spoofing_pf(struct e1000_hw *hw, bool enable, int pf)
++void e1000_vmdq_set_anti_spoofing_pf(struct e1000_hw *hw, bool enable, int pf)
+ {
+ u32 reg_val, reg_offset;
+
+@@ -2044,7 +2281,7 @@
+ return;
+ }
+
+- reg_val = rd32(reg_offset);
++ reg_val = E1000_READ_REG(hw, reg_offset);
+ if (enable) {
+ reg_val |= (E1000_DTXSWC_MAC_SPOOF_MASK |
+ E1000_DTXSWC_VLAN_SPOOF_MASK);
+@@ -2056,66 +2293,67 @@
+ reg_val &= ~(E1000_DTXSWC_MAC_SPOOF_MASK |
+ E1000_DTXSWC_VLAN_SPOOF_MASK);
+ }
+- wr32(reg_offset, reg_val);
++ E1000_WRITE_REG(hw, reg_offset, reg_val);
+ }
+
+ /**
+- * igb_vmdq_set_loopback_pf - enable or disable vmdq loopback
++ * e1000_vmdq_set_loopback_pf - enable or disable vmdq loopback
+ * @hw: pointer to the hardware struct
+ * @enable: state to enter, either enabled or disabled
+ *
+ * enables/disables L2 switch loopback functionality.
+ **/
+-void igb_vmdq_set_loopback_pf(struct e1000_hw *hw, bool enable)
++void e1000_vmdq_set_loopback_pf(struct e1000_hw *hw, bool enable)
+ {
+ u32 dtxswc;
+
+ switch (hw->mac.type) {
+ case e1000_82576:
+- dtxswc = rd32(E1000_DTXSWC);
++ dtxswc = E1000_READ_REG(hw, E1000_DTXSWC);
+ if (enable)
+ dtxswc |= E1000_DTXSWC_VMDQ_LOOPBACK_EN;
+ else
+ dtxswc &= ~E1000_DTXSWC_VMDQ_LOOPBACK_EN;
+- wr32(E1000_DTXSWC, dtxswc);
++ E1000_WRITE_REG(hw, E1000_DTXSWC, dtxswc);
+ break;
+- case e1000_i354:
+ case e1000_i350:
+- dtxswc = rd32(E1000_TXSWC);
++ case e1000_i354:
++ dtxswc = E1000_READ_REG(hw, E1000_TXSWC);
+ if (enable)
+ dtxswc |= E1000_DTXSWC_VMDQ_LOOPBACK_EN;
+ else
+ dtxswc &= ~E1000_DTXSWC_VMDQ_LOOPBACK_EN;
+- wr32(E1000_TXSWC, dtxswc);
++ E1000_WRITE_REG(hw, E1000_TXSWC, dtxswc);
+ break;
+ default:
+ /* Currently no other hardware supports loopback */
+ break;
+ }
+
++
+ }
+
+ /**
+- * igb_vmdq_set_replication_pf - enable or disable vmdq replication
++ * e1000_vmdq_set_replication_pf - enable or disable vmdq replication
+ * @hw: pointer to the hardware struct
+ * @enable: state to enter, either enabled or disabled
+ *
+ * enables/disables replication of packets across multiple pools.
+ **/
+-void igb_vmdq_set_replication_pf(struct e1000_hw *hw, bool enable)
++void e1000_vmdq_set_replication_pf(struct e1000_hw *hw, bool enable)
+ {
+- u32 vt_ctl = rd32(E1000_VT_CTL);
++ u32 vt_ctl = E1000_READ_REG(hw, E1000_VT_CTL);
+
+ if (enable)
+ vt_ctl |= E1000_VT_CTL_VM_REPL_EN;
+ else
+ vt_ctl &= ~E1000_VT_CTL_VM_REPL_EN;
+
+- wr32(E1000_VT_CTL, vt_ctl);
++ E1000_WRITE_REG(hw, E1000_VT_CTL, vt_ctl);
+ }
+
+ /**
+- * igb_read_phy_reg_82580 - Read 82580 MDI control register
++ * e1000_read_phy_reg_82580 - Read 82580 MDI control register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to be read
+ * @data: pointer to the read data
+@@ -2123,15 +2361,17 @@
+ * Reads the MDI control register in the PHY at offset and stores the
+ * information read to data.
+ **/
+-static s32 igb_read_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 *data)
++static s32 e1000_read_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 *data)
+ {
+ s32 ret_val;
+
++ DEBUGFUNC("e1000_read_phy_reg_82580");
++
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ goto out;
+
+- ret_val = igb_read_phy_reg_mdic(hw, offset, data);
++ ret_val = e1000_read_phy_reg_mdic(hw, offset, data);
+
+ hw->phy.ops.release(hw);
+
+@@ -2140,23 +2380,24 @@
+ }
+
+ /**
+- * igb_write_phy_reg_82580 - Write 82580 MDI control register
++ * e1000_write_phy_reg_82580 - Write 82580 MDI control register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to write to
+ * @data: data to write to register at offset
+ *
+ * Writes data to MDI control register in the PHY at offset.
+ **/
+-static s32 igb_write_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 data)
++static s32 e1000_write_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 data)
+ {
+ s32 ret_val;
+
++ DEBUGFUNC("e1000_write_phy_reg_82580");
+
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ goto out;
+
+- ret_val = igb_write_phy_reg_mdic(hw, offset, data);
++ ret_val = e1000_write_phy_reg_mdic(hw, offset, data);
+
+ hw->phy.ops.release(hw);
+
+@@ -2165,123 +2406,133 @@
+ }
+
+ /**
+- * igb_reset_mdicnfg_82580 - Reset MDICNFG destination and com_mdio bits
++ * e1000_reset_mdicnfg_82580 - Reset MDICNFG destination and com_mdio bits
+ * @hw: pointer to the HW structure
+ *
+ * This resets the the MDICNFG.Destination and MDICNFG.Com_MDIO bits based on
+ * the values found in the EEPROM. This addresses an issue in which these
+ * bits are not restored from EEPROM after reset.
+ **/
+-static s32 igb_reset_mdicnfg_82580(struct e1000_hw *hw)
++static s32 e1000_reset_mdicnfg_82580(struct e1000_hw *hw)
+ {
+- s32 ret_val = 0;
++ s32 ret_val = E1000_SUCCESS;
+ u32 mdicnfg;
+ u16 nvm_data = 0;
+
++ DEBUGFUNC("e1000_reset_mdicnfg_82580");
++
+ if (hw->mac.type != e1000_82580)
+ goto out;
+- if (!igb_sgmii_active_82575(hw))
++ if (!e1000_sgmii_active_82575(hw))
+ goto out;
+
+ ret_val = hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A +
+ NVM_82580_LAN_FUNC_OFFSET(hw->bus.func), 1,
+ &nvm_data);
+ if (ret_val) {
+- hw_dbg("NVM Read Error\n");
++ DEBUGOUT("NVM Read Error\n");
+ goto out;
+ }
+
+- mdicnfg = rd32(E1000_MDICNFG);
++ mdicnfg = E1000_READ_REG(hw, E1000_MDICNFG);
+ if (nvm_data & NVM_WORD24_EXT_MDIO)
+ mdicnfg |= E1000_MDICNFG_EXT_MDIO;
+ if (nvm_data & NVM_WORD24_COM_MDIO)
+ mdicnfg |= E1000_MDICNFG_COM_MDIO;
+- wr32(E1000_MDICNFG, mdicnfg);
++ E1000_WRITE_REG(hw, E1000_MDICNFG, mdicnfg);
+ out:
+ return ret_val;
+ }
+
+ /**
+- * igb_reset_hw_82580 - Reset hardware
++ * e1000_reset_hw_82580 - Reset hardware
+ * @hw: pointer to the HW structure
+ *
+ * This resets function or entire device (all ports, etc.)
+ * to a known state.
+ **/
+-static s32 igb_reset_hw_82580(struct e1000_hw *hw)
++static s32 e1000_reset_hw_82580(struct e1000_hw *hw)
+ {
+- s32 ret_val = 0;
++ s32 ret_val = E1000_SUCCESS;
+ /* BH SW mailbox bit in SW_FW_SYNC */
+ u16 swmbsw_mask = E1000_SW_SYNCH_MB;
+ u32 ctrl;
+ bool global_device_reset = hw->dev_spec._82575.global_device_reset;
+
++ DEBUGFUNC("e1000_reset_hw_82580");
++
+ hw->dev_spec._82575.global_device_reset = false;
+
+- /* due to hw errata, global device reset doesn't always
+- * work on 82580
+- */
++ /* 82580 does not reliably do global_device_reset due to hw errata */
+ if (hw->mac.type == e1000_82580)
+ global_device_reset = false;
+
+ /* Get current control state. */
+- ctrl = rd32(E1000_CTRL);
++ ctrl = E1000_READ_REG(hw, E1000_CTRL);
+
+- /* Prevent the PCI-E bus from sticking if there is no TLP connection
++ /*
++ * Prevent the PCI-E bus from sticking if there is no TLP connection
+ * on the last TLP read/write transaction when MAC is reset.
+ */
+- ret_val = igb_disable_pcie_master(hw);
++ ret_val = e1000_disable_pcie_master_generic(hw);
+ if (ret_val)
+- hw_dbg("PCI-E Master disable polling has failed.\n");
++ DEBUGOUT("PCI-E Master disable polling has failed.\n");
+
+- hw_dbg("Masking off all interrupts\n");
+- wr32(E1000_IMC, 0xffffffff);
+- wr32(E1000_RCTL, 0);
+- wr32(E1000_TCTL, E1000_TCTL_PSP);
+- wrfl();
++ DEBUGOUT("Masking off all interrupts\n");
++ E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
++ E1000_WRITE_REG(hw, E1000_RCTL, 0);
++ E1000_WRITE_REG(hw, E1000_TCTL, E1000_TCTL_PSP);
++ E1000_WRITE_FLUSH(hw);
+
+- usleep_range(10000, 11000);
++ msec_delay(10);
+
+ /* Determine whether or not a global dev reset is requested */
+- if (global_device_reset &&
+- hw->mac.ops.acquire_swfw_sync(hw, swmbsw_mask))
++ if (global_device_reset && hw->mac.ops.acquire_swfw_sync(hw,
++ swmbsw_mask))
+ global_device_reset = false;
+
+- if (global_device_reset &&
+- !(rd32(E1000_STATUS) & E1000_STAT_DEV_RST_SET))
++ if (global_device_reset && !(E1000_READ_REG(hw, E1000_STATUS) &
++ E1000_STAT_DEV_RST_SET))
+ ctrl |= E1000_CTRL_DEV_RST;
+ else
+ ctrl |= E1000_CTRL_RST;
+
+- wr32(E1000_CTRL, ctrl);
+- wrfl();
++ E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
+
+- /* Add delay to insure DEV_RST has time to complete */
+- if (global_device_reset)
+- usleep_range(5000, 6000);
++ switch (hw->device_id) {
++ case E1000_DEV_ID_DH89XXCC_SGMII:
++ break;
++ default:
++ E1000_WRITE_FLUSH(hw);
++ break;
++ }
++
++ /* Add delay to insure DEV_RST or RST has time to complete */
++ msec_delay(5);
+
+- ret_val = igb_get_auto_rd_done(hw);
++ ret_val = e1000_get_auto_rd_done_generic(hw);
+ if (ret_val) {
+- /* When auto config read does not complete, do not
++ /*
++ * When auto config read does not complete, do not
+ * return with an error. This can happen in situations
+ * where there is no eeprom and prevents getting link.
+ */
+- hw_dbg("Auto Read Done did not complete\n");
++ DEBUGOUT("Auto Read Done did not complete\n");
+ }
+
+ /* clear global device reset status bit */
+- wr32(E1000_STATUS, E1000_STAT_DEV_RST_SET);
++ E1000_WRITE_REG(hw, E1000_STATUS, E1000_STAT_DEV_RST_SET);
+
+ /* Clear any pending interrupt events. */
+- wr32(E1000_IMC, 0xffffffff);
+- rd32(E1000_ICR);
++ E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
++ E1000_READ_REG(hw, E1000_ICR);
+
+- ret_val = igb_reset_mdicnfg_82580(hw);
++ ret_val = e1000_reset_mdicnfg_82580(hw);
+ if (ret_val)
+- hw_dbg("Could not reset MDICNFG based on EEPROM\n");
++ DEBUGOUT("Could not reset MDICNFG based on EEPROM\n");
+
+ /* Install any alternate MAC address into RAR0 */
+- ret_val = igb_check_alt_mac_addr(hw);
++ ret_val = igb_e1000_check_alt_mac_addr_generic(hw);
+
+ /* Release semaphore */
+ if (global_device_reset)
+@@ -2291,7 +2542,7 @@
+ }
+
+ /**
+- * igb_rxpbs_adjust_82580 - adjust RXPBS value to reflect actual RX PBA size
++ * e1000_rxpbs_adjust_82580 - adjust RXPBS value to reflect actual Rx PBA size
+ * @data: data received by reading RXPBS register
+ *
+ * The 82580 uses a table based approach for packet buffer allocation sizes.
+@@ -2300,398 +2551,1222 @@
+ * 0x0 36 72 144 1 2 4 8 16
+ * 0x8 35 70 140 rsv rsv rsv rsv rsv
+ */
+-u16 igb_rxpbs_adjust_82580(u32 data)
++u16 e1000_rxpbs_adjust_82580(u32 data)
+ {
+ u16 ret_val = 0;
+
+- if (data < ARRAY_SIZE(e1000_82580_rxpbs_table))
++ if (data < E1000_82580_RXPBS_TABLE_SIZE)
+ ret_val = e1000_82580_rxpbs_table[data];
+
+- return ret_val;
++ return ret_val;
++}
++
++/**
++ * e1000_validate_nvm_checksum_with_offset - Validate EEPROM
++ * checksum
++ * @hw: pointer to the HW structure
++ * @offset: offset in words of the checksum protected region
++ *
++ * Calculates the EEPROM checksum by reading/adding each word of the EEPROM
++ * and then verifies that the sum of the EEPROM is equal to 0xBABA.
++ **/
++s32 e1000_validate_nvm_checksum_with_offset(struct e1000_hw *hw, u16 offset)
++{
++ s32 ret_val = E1000_SUCCESS;
++ u16 checksum = 0;
++ u16 i, nvm_data;
++
++ DEBUGFUNC("e1000_validate_nvm_checksum_with_offset");
++
++ for (i = offset; i < ((NVM_CHECKSUM_REG + offset) + 1); i++) {
++ ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data);
++ if (ret_val) {
++ DEBUGOUT("NVM Read Error\n");
++ goto out;
++ }
++ checksum += nvm_data;
++ }
++
++ if (checksum != (u16) NVM_SUM) {
++ DEBUGOUT("NVM Checksum Invalid\n");
++ ret_val = -E1000_ERR_NVM;
++ goto out;
++ }
++
++out:
++ return ret_val;
++}
++
++/**
++ * e1000_update_nvm_checksum_with_offset - Update EEPROM
++ * checksum
++ * @hw: pointer to the HW structure
++ * @offset: offset in words of the checksum protected region
++ *
++ * Updates the EEPROM checksum by reading/adding each word of the EEPROM
++ * up to the checksum. Then calculates the EEPROM checksum and writes the
++ * value to the EEPROM.
++ **/
++s32 e1000_update_nvm_checksum_with_offset(struct e1000_hw *hw, u16 offset)
++{
++ s32 ret_val;
++ u16 checksum = 0;
++ u16 i, nvm_data;
++
++ DEBUGFUNC("e1000_update_nvm_checksum_with_offset");
++
++ for (i = offset; i < (NVM_CHECKSUM_REG + offset); i++) {
++ ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data);
++ if (ret_val) {
++ DEBUGOUT("NVM Read Error while updating checksum.\n");
++ goto out;
++ }
++ checksum += nvm_data;
++ }
++ checksum = (u16) NVM_SUM - checksum;
++ ret_val = hw->nvm.ops.write(hw, (NVM_CHECKSUM_REG + offset), 1,
++ &checksum);
++ if (ret_val)
++ DEBUGOUT("NVM Write Error while updating checksum.\n");
++
++out:
++ return ret_val;
++}
++
++/**
++ * e1000_validate_nvm_checksum_82580 - Validate EEPROM checksum
++ * @hw: pointer to the HW structure
++ *
++ * Calculates the EEPROM section checksum by reading/adding each word of
++ * the EEPROM and then verifies that the sum of the EEPROM is
++ * equal to 0xBABA.
++ **/
++static s32 e1000_validate_nvm_checksum_82580(struct e1000_hw *hw)
++{
++ s32 ret_val;
++ u16 eeprom_regions_count = 1;
++ u16 j, nvm_data;
++ u16 nvm_offset;
++
++ DEBUGFUNC("e1000_validate_nvm_checksum_82580");
++
++ ret_val = hw->nvm.ops.read(hw, NVM_COMPATIBILITY_REG_3, 1, &nvm_data);
++ if (ret_val) {
++ DEBUGOUT("NVM Read Error\n");
++ goto out;
++ }
++
++ if (nvm_data & NVM_COMPATIBILITY_BIT_MASK) {
++ /* if chekcsums compatibility bit is set validate checksums
++ * for all 4 ports. */
++ eeprom_regions_count = 4;
++ }
++
++ for (j = 0; j < eeprom_regions_count; j++) {
++ nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j);
++ ret_val = e1000_validate_nvm_checksum_with_offset(hw,
++ nvm_offset);
++ if (ret_val != E1000_SUCCESS)
++ goto out;
++ }
++
++out:
++ return ret_val;
++}
++
++/**
++ * e1000_update_nvm_checksum_82580 - Update EEPROM checksum
++ * @hw: pointer to the HW structure
++ *
++ * Updates the EEPROM section checksums for all 4 ports by reading/adding
++ * each word of the EEPROM up to the checksum. Then calculates the EEPROM
++ * checksum and writes the value to the EEPROM.
++ **/
++static s32 e1000_update_nvm_checksum_82580(struct e1000_hw *hw)
++{
++ s32 ret_val;
++ u16 j, nvm_data;
++ u16 nvm_offset;
++
++ DEBUGFUNC("e1000_update_nvm_checksum_82580");
++
++ ret_val = hw->nvm.ops.read(hw, NVM_COMPATIBILITY_REG_3, 1, &nvm_data);
++ if (ret_val) {
++ DEBUGOUT("NVM Read Error while updating checksum compatibility bit.\n");
++ goto out;
++ }
++
++ if (!(nvm_data & NVM_COMPATIBILITY_BIT_MASK)) {
++ /* set compatibility bit to validate checksums appropriately */
++ nvm_data = nvm_data | NVM_COMPATIBILITY_BIT_MASK;
++ ret_val = hw->nvm.ops.write(hw, NVM_COMPATIBILITY_REG_3, 1,
++ &nvm_data);
++ if (ret_val) {
++ DEBUGOUT("NVM Write Error while updating checksum compatibility bit.\n");
++ goto out;
++ }
++ }
++
++ for (j = 0; j < 4; j++) {
++ nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j);
++ ret_val = e1000_update_nvm_checksum_with_offset(hw, nvm_offset);
++ if (ret_val)
++ goto out;
++ }
++
++out:
++ return ret_val;
++}
++
++/**
++ * e1000_validate_nvm_checksum_i350 - Validate EEPROM checksum
++ * @hw: pointer to the HW structure
++ *
++ * Calculates the EEPROM section checksum by reading/adding each word of
++ * the EEPROM and then verifies that the sum of the EEPROM is
++ * equal to 0xBABA.
++ **/
++static s32 e1000_validate_nvm_checksum_i350(struct e1000_hw *hw)
++{
++ s32 ret_val = E1000_SUCCESS;
++ u16 j;
++ u16 nvm_offset;
++
++ DEBUGFUNC("e1000_validate_nvm_checksum_i350");
++
++ for (j = 0; j < 4; j++) {
++ nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j);
++ ret_val = e1000_validate_nvm_checksum_with_offset(hw,
++ nvm_offset);
++ if (ret_val != E1000_SUCCESS)
++ goto out;
++ }
++
++out:
++ return ret_val;
++}
++
++/**
++ * e1000_update_nvm_checksum_i350 - Update EEPROM checksum
++ * @hw: pointer to the HW structure
++ *
++ * Updates the EEPROM section checksums for all 4 ports by reading/adding
++ * each word of the EEPROM up to the checksum. Then calculates the EEPROM
++ * checksum and writes the value to the EEPROM.
++ **/
++static s32 e1000_update_nvm_checksum_i350(struct e1000_hw *hw)
++{
++ s32 ret_val = E1000_SUCCESS;
++ u16 j;
++ u16 nvm_offset;
++
++ DEBUGFUNC("e1000_update_nvm_checksum_i350");
++
++ for (j = 0; j < 4; j++) {
++ nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j);
++ ret_val = e1000_update_nvm_checksum_with_offset(hw, nvm_offset);
++ if (ret_val != E1000_SUCCESS)
++ goto out;
++ }
++
++out:
++ return ret_val;
++}
++
++/**
++ * __e1000_access_emi_reg - Read/write EMI register
++ * @hw: pointer to the HW structure
++ * @addr: EMI address to program
++ * @data: pointer to value to read/write from/to the EMI address
++ * @read: boolean flag to indicate read or write
++ **/
++static s32 __e1000_access_emi_reg(struct e1000_hw *hw, u16 address,
++ u16 *data, bool read)
++{
++ s32 ret_val;
++
++ DEBUGFUNC("__e1000_access_emi_reg");
++
++ ret_val = hw->phy.ops.write_reg(hw, E1000_EMIADD, address);
++ if (ret_val)
++ return ret_val;
++
++ if (read)
++ ret_val = hw->phy.ops.read_reg(hw, E1000_EMIDATA, data);
++ else
++ ret_val = hw->phy.ops.write_reg(hw, E1000_EMIDATA, *data);
++
++ return ret_val;
++}
++
++/**
++ * e1000_read_emi_reg - Read Extended Management Interface register
++ * @hw: pointer to the HW structure
++ * @addr: EMI address to program
++ * @data: value to be read from the EMI address
++ **/
++s32 e1000_read_emi_reg(struct e1000_hw *hw, u16 addr, u16 *data)
++{
++ DEBUGFUNC("e1000_read_emi_reg");
++
++ return __e1000_access_emi_reg(hw, addr, data, true);
++}
++
++/**
++ * e1000_initialize_M88E1512_phy - Initialize M88E1512 PHY
++ * @hw: pointer to the HW structure
++ *
++ * Initialize Marvell 1512 to work correctly with Avoton.
++ **/
++s32 e1000_initialize_M88E1512_phy(struct e1000_hw *hw)
++{
++ struct e1000_phy_info *phy = &hw->phy;
++ s32 ret_val = E1000_SUCCESS;
++
++ DEBUGFUNC("e1000_initialize_M88E1512_phy");
++
++ /* Check if this is correct PHY. */
++ if (phy->id != M88E1512_E_PHY_ID)
++ goto out;
++
++ /* Switch to PHY page 0xFF. */
++ ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0x00FF);
++ if (ret_val)
++ goto out;
++
++ ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_2, 0x214B);
++ if (ret_val)
++ goto out;
++
++ ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_1, 0x2144);
++ if (ret_val)
++ goto out;
++
++ ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_2, 0x0C28);
++ if (ret_val)
++ goto out;
++
++ ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_1, 0x2146);
++ if (ret_val)
++ goto out;
++
++ ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_2, 0xB233);
++ if (ret_val)
++ goto out;
++
++ ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_1, 0x214D);
++ if (ret_val)
++ goto out;
++
++ ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_2, 0xCC0C);
++ if (ret_val)
++ goto out;
++
++ ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_1, 0x2159);
++ if (ret_val)
++ goto out;
++
++ /* Switch to PHY page 0xFB. */
++ ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0x00FB);
++ if (ret_val)
++ goto out;
++
++ ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_3, 0x000D);
++ if (ret_val)
++ goto out;
++
++ /* Switch to PHY page 0x12. */
++ ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0x12);
++ if (ret_val)
++ goto out;
++
++ /* Change mode to SGMII-to-Copper */
++ ret_val = phy->ops.write_reg(hw, E1000_M88E1512_MODE, 0x8001);
++ if (ret_val)
++ goto out;
++
++ /* Return the PHY to page 0. */
++ ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0);
++ if (ret_val)
++ goto out;
++
++ ret_val = phy->ops.commit(hw);
++ if (ret_val) {
++ DEBUGOUT("Error committing the PHY changes\n");
++ return ret_val;
++ }
++
++ msec_delay(1000);
++out:
++ return ret_val;
++}
++
++/**
++ * e1000_initialize_M88E1543_phy - Initialize M88E1543 PHY
++ * @hw: pointer to the HW structure
++ *
++ * Initialize Marvell 1543 to work correctly with Avoton.
++ **/
++s32 e1000_initialize_M88E1543_phy(struct e1000_hw *hw)
++{
++ struct e1000_phy_info *phy = &hw->phy;
++ s32 ret_val = E1000_SUCCESS;
++
++ DEBUGFUNC("e1000_initialize_M88E1543_phy");
++
++ /* Check if this is correct PHY. */
++ if (phy->id != M88E1543_E_PHY_ID)
++ goto out;
++
++ /* Switch to PHY page 0xFF. */
++ ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0x00FF);
++ if (ret_val)
++ goto out;
++
++ ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_2, 0x214B);
++ if (ret_val)
++ goto out;
++
++ ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_1, 0x2144);
++ if (ret_val)
++ goto out;
++
++ ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_2, 0x0C28);
++ if (ret_val)
++ goto out;
++
++ ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_1, 0x2146);
++ if (ret_val)
++ goto out;
++
++ ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_2, 0xB233);
++ if (ret_val)
++ goto out;
++
++ ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_1, 0x214D);
++ if (ret_val)
++ goto out;
++
++ ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_2, 0xDC0C);
++ if (ret_val)
++ goto out;
++
++ ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_1, 0x2159);
++ if (ret_val)
++ goto out;
++
++ /* Switch to PHY page 0xFB. */
++ ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0x00FB);
++ if (ret_val)
++ goto out;
++
++ ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_3, 0xC00D);
++ if (ret_val)
++ goto out;
++
++ /* Switch to PHY page 0x12. */
++ ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0x12);
++ if (ret_val)
++ goto out;
++
++ /* Change mode to SGMII-to-Copper */
++ ret_val = phy->ops.write_reg(hw, E1000_M88E1512_MODE, 0x8001);
++ if (ret_val)
++ goto out;
++
++ /* Switch to PHY page 1. */
++ ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0x1);
++ if (ret_val)
++ goto out;
++
++ /* Change mode to 1000BASE-X/SGMII and autoneg enable; reset */
++ ret_val = phy->ops.write_reg(hw, E1000_M88E1543_FIBER_CTRL, 0x9140);
++ if (ret_val)
++ goto out;
++
++ /* Return the PHY to page 0. */
++ ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0);
++ if (ret_val)
++ goto out;
++
++ ret_val = phy->ops.commit(hw);
++ if (ret_val) {
++ DEBUGOUT("Error committing the PHY changes\n");
++ return ret_val;
++ }
++
++ msec_delay(1000);
++out:
++ return ret_val;
++}
++
++/**
++ * e1000_set_eee_i350 - Enable/disable EEE support
++ * @hw: pointer to the HW structure
++ * @adv1g: boolean flag enabling 1G EEE advertisement
++ * @adv100m: boolean flag enabling 100M EEE advertisement
++ *
++ * Enable/disable EEE based on setting in dev_spec structure.
++ *
++ **/
++s32 e1000_set_eee_i350(struct e1000_hw *hw, bool adv1G, bool adv100M)
++{
++ u32 ipcnfg, eeer;
++
++ DEBUGFUNC("e1000_set_eee_i350");
++
++ if ((hw->mac.type < e1000_i350) ||
++ (hw->phy.media_type != e1000_media_type_copper))
++ goto out;
++ ipcnfg = E1000_READ_REG(hw, E1000_IPCNFG);
++ eeer = E1000_READ_REG(hw, E1000_EEER);
++
++ /* enable or disable per user setting */
++ if (!(hw->dev_spec._82575.eee_disable)) {
++ u32 eee_su = E1000_READ_REG(hw, E1000_EEE_SU);
++
++ if (adv100M)
++ ipcnfg |= E1000_IPCNFG_EEE_100M_AN;
++ else
++ ipcnfg &= ~E1000_IPCNFG_EEE_100M_AN;
++
++ if (adv1G)
++ ipcnfg |= E1000_IPCNFG_EEE_1G_AN;
++ else
++ ipcnfg &= ~E1000_IPCNFG_EEE_1G_AN;
++
++ eeer |= (E1000_EEER_TX_LPI_EN | E1000_EEER_RX_LPI_EN |
++ E1000_EEER_LPI_FC);
++
++ /* This bit should not be set in normal operation. */
++ if (eee_su & E1000_EEE_SU_LPI_CLK_STP)
++ DEBUGOUT("LPI Clock Stop Bit should not be set!\n");
++ } else {
++ ipcnfg &= ~(E1000_IPCNFG_EEE_1G_AN | E1000_IPCNFG_EEE_100M_AN);
++ eeer &= ~(E1000_EEER_TX_LPI_EN | E1000_EEER_RX_LPI_EN |
++ E1000_EEER_LPI_FC);
++ }
++ E1000_WRITE_REG(hw, E1000_IPCNFG, ipcnfg);
++ E1000_WRITE_REG(hw, E1000_EEER, eeer);
++ E1000_READ_REG(hw, E1000_IPCNFG);
++ E1000_READ_REG(hw, E1000_EEER);
++out:
++
++ return E1000_SUCCESS;
++}
++
++/**
++ * e1000_set_eee_i354 - Enable/disable EEE support
++ * @hw: pointer to the HW structure
++ * @adv1g: boolean flag enabling 1G EEE advertisement
++ * @adv100m: boolean flag enabling 100M EEE advertisement
++ *
++ * Enable/disable EEE legacy mode based on setting in dev_spec structure.
++ *
++ **/
++s32 e1000_set_eee_i354(struct e1000_hw *hw, bool adv1G, bool adv100M)
++{
++ struct e1000_phy_info *phy = &hw->phy;
++ s32 ret_val = E1000_SUCCESS;
++ u16 phy_data;
++
++ DEBUGFUNC("e1000_set_eee_i354");
++
++ if ((hw->phy.media_type != e1000_media_type_copper) ||
++ ((phy->id != M88E1543_E_PHY_ID) &&
++ (phy->id != M88E1512_E_PHY_ID)))
++ goto out;
++
++ if (!hw->dev_spec._82575.eee_disable) {
++ /* Switch to PHY page 18. */
++ ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 18);
++ if (ret_val)
++ goto out;
++
++ ret_val = phy->ops.read_reg(hw, E1000_M88E1543_EEE_CTRL_1,
++ &phy_data);
++ if (ret_val)
++ goto out;
++
++ phy_data |= E1000_M88E1543_EEE_CTRL_1_MS;
++ ret_val = phy->ops.write_reg(hw, E1000_M88E1543_EEE_CTRL_1,
++ phy_data);
++ if (ret_val)
++ goto out;
++
++ /* Return the PHY to page 0. */
++ ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0);
++ if (ret_val)
++ goto out;
++
++ /* Turn on EEE advertisement. */
++ ret_val = e1000_read_xmdio_reg(hw, E1000_EEE_ADV_ADDR_I354,
++ E1000_EEE_ADV_DEV_I354,
++ &phy_data);
++ if (ret_val)
++ goto out;
++
++ if (adv100M)
++ phy_data |= E1000_EEE_ADV_100_SUPPORTED;
++ else
++ phy_data &= ~E1000_EEE_ADV_100_SUPPORTED;
++
++ if (adv1G)
++ phy_data |= E1000_EEE_ADV_1000_SUPPORTED;
++ else
++ phy_data &= ~E1000_EEE_ADV_1000_SUPPORTED;
++
++ ret_val = e1000_write_xmdio_reg(hw, E1000_EEE_ADV_ADDR_I354,
++ E1000_EEE_ADV_DEV_I354,
++ phy_data);
++ } else {
++ /* Turn off EEE advertisement. */
++ ret_val = e1000_read_xmdio_reg(hw, E1000_EEE_ADV_ADDR_I354,
++ E1000_EEE_ADV_DEV_I354,
++ &phy_data);
++ if (ret_val)
++ goto out;
++
++ phy_data &= ~(E1000_EEE_ADV_100_SUPPORTED |
++ E1000_EEE_ADV_1000_SUPPORTED);
++ ret_val = e1000_write_xmdio_reg(hw, E1000_EEE_ADV_ADDR_I354,
++ E1000_EEE_ADV_DEV_I354,
++ phy_data);
++ }
++
++out:
++ return ret_val;
++}
++
++/**
++ * e1000_get_eee_status_i354 - Get EEE status
++ * @hw: pointer to the HW structure
++ * @status: EEE status
++ *
++ * Get EEE status by guessing based on whether Tx or Rx LPI indications have
++ * been received.
++ **/
++s32 e1000_get_eee_status_i354(struct e1000_hw *hw, bool *status)
++{
++ struct e1000_phy_info *phy = &hw->phy;
++ s32 ret_val = E1000_SUCCESS;
++ u16 phy_data;
++
++ DEBUGFUNC("e1000_get_eee_status_i354");
++
++ /* Check if EEE is supported on this device. */
++ if ((hw->phy.media_type != e1000_media_type_copper) ||
++ ((phy->id != M88E1543_E_PHY_ID) &&
++ (phy->id != M88E1512_E_PHY_ID)))
++ goto out;
++
++ ret_val = e1000_read_xmdio_reg(hw, E1000_PCS_STATUS_ADDR_I354,
++ E1000_PCS_STATUS_DEV_I354,
++ &phy_data);
++ if (ret_val)
++ goto out;
++
++ *status = phy_data & (E1000_PCS_STATUS_TX_LPI_RCVD |
++ E1000_PCS_STATUS_RX_LPI_RCVD) ? true : false;
++
++out:
++ return ret_val;
++}
++
++/* Due to a hw errata, if the host tries to configure the VFTA register
++ * while performing queries from the BMC or DMA, then the VFTA in some
++ * cases won't be written.
++ */
++
++/**
++ * e1000_clear_vfta_i350 - Clear VLAN filter table
++ * @hw: pointer to the HW structure
++ *
++ * Clears the register array which contains the VLAN filter table by
++ * setting all the values to 0.
++ **/
++void e1000_clear_vfta_i350(struct e1000_hw *hw)
++{
++ u32 offset;
++ int i;
++
++ DEBUGFUNC("e1000_clear_vfta_350");
++
++ for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) {
++ for (i = 0; i < 10; i++)
++ E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset, 0);
++
++ E1000_WRITE_FLUSH(hw);
++ }
++}
++
++/**
++ * e1000_write_vfta_i350 - Write value to VLAN filter table
++ * @hw: pointer to the HW structure
++ * @offset: register offset in VLAN filter table
++ * @value: register value written to VLAN filter table
++ *
++ * Writes value at the given offset in the register array which stores
++ * the VLAN filter table.
++ **/
++void e1000_write_vfta_i350(struct e1000_hw *hw, u32 offset, u32 value)
++{
++ int i;
++
++ DEBUGFUNC("e1000_write_vfta_350");
++
++ for (i = 0; i < 10; i++)
++ E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset, value);
++
++ E1000_WRITE_FLUSH(hw);
++}
++
++/**
++ * e1000_set_i2c_bb - Enable I2C bit-bang
++ * @hw: pointer to the HW structure
++ *
++ * Enable I2C bit-bang interface
++ *
++ **/
++s32 e1000_set_i2c_bb(struct e1000_hw *hw)
++{
++ s32 ret_val = E1000_SUCCESS;
++ u32 ctrl_ext, i2cparams;
++
++ DEBUGFUNC("e1000_set_i2c_bb");
++
++ ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
++ ctrl_ext |= E1000_CTRL_I2C_ENA;
++ E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
++ E1000_WRITE_FLUSH(hw);
++
++ i2cparams = E1000_READ_REG(hw, E1000_I2CPARAMS);
++ i2cparams |= E1000_I2CBB_EN;
++ i2cparams |= E1000_I2C_DATA_OE_N;
++ i2cparams |= E1000_I2C_CLK_OE_N;
++ E1000_WRITE_REG(hw, E1000_I2CPARAMS, i2cparams);
++ E1000_WRITE_FLUSH(hw);
++
++ return ret_val;
++}
++
++/**
++ * e1000_read_i2c_byte_generic - Reads 8 bit word over I2C
++ * @hw: pointer to hardware structure
++ * @byte_offset: byte offset to read
++ * @dev_addr: device address
++ * @data: value read
++ *
++ * Performs byte read operation over I2C interface at
++ * a specified device address.
++ **/
++s32 e1000_read_i2c_byte_generic(struct e1000_hw *hw, u8 byte_offset,
++ u8 dev_addr, u8 *data)
++{
++ s32 status = E1000_SUCCESS;
++ u32 max_retry = 10;
++ u32 retry = 1;
++ u16 swfw_mask = 0;
++
++ bool nack = true;
++
++ DEBUGFUNC("e1000_read_i2c_byte_generic");
++
++ swfw_mask = E1000_SWFW_PHY0_SM;
++
++ do {
++ if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask)
++ != E1000_SUCCESS) {
++ status = E1000_ERR_SWFW_SYNC;
++ goto read_byte_out;
++ }
++
++ e1000_i2c_start(hw);
++
++ /* Device Address and write indication */
++ status = e1000_clock_out_i2c_byte(hw, dev_addr);
++ if (status != E1000_SUCCESS)
++ goto fail;
++
++ status = e1000_get_i2c_ack(hw);
++ if (status != E1000_SUCCESS)
++ goto fail;
++
++ status = e1000_clock_out_i2c_byte(hw, byte_offset);
++ if (status != E1000_SUCCESS)
++ goto fail;
++
++ status = e1000_get_i2c_ack(hw);
++ if (status != E1000_SUCCESS)
++ goto fail;
++
++ e1000_i2c_start(hw);
++
++ /* Device Address and read indication */
++ status = e1000_clock_out_i2c_byte(hw, (dev_addr | 0x1));
++ if (status != E1000_SUCCESS)
++ goto fail;
++
++ status = e1000_get_i2c_ack(hw);
++ if (status != E1000_SUCCESS)
++ goto fail;
++
++ status = e1000_clock_in_i2c_byte(hw, data);
++ if (status != E1000_SUCCESS)
++ goto fail;
++
++ status = e1000_clock_out_i2c_bit(hw, nack);
++ if (status != E1000_SUCCESS)
++ goto fail;
++
++ e1000_i2c_stop(hw);
++ break;
++
++fail:
++ hw->mac.ops.release_swfw_sync(hw, swfw_mask);
++ msec_delay(100);
++ e1000_i2c_bus_clear(hw);
++ retry++;
++ if (retry < max_retry)
++ DEBUGOUT("I2C byte read error - Retrying.\n");
++ else
++ DEBUGOUT("I2C byte read error.\n");
++
++ } while (retry < max_retry);
++
++ hw->mac.ops.release_swfw_sync(hw, swfw_mask);
++
++read_byte_out:
++
++ return status;
++}
++
++/**
++ * e1000_write_i2c_byte_generic - Writes 8 bit word over I2C
++ * @hw: pointer to hardware structure
++ * @byte_offset: byte offset to write
++ * @dev_addr: device address
++ * @data: value to write
++ *
++ * Performs byte write operation over I2C interface at
++ * a specified device address.
++ **/
++s32 e1000_write_i2c_byte_generic(struct e1000_hw *hw, u8 byte_offset,
++ u8 dev_addr, u8 data)
++{
++ s32 status = E1000_SUCCESS;
++ u32 max_retry = 1;
++ u32 retry = 0;
++ u16 swfw_mask = 0;
++
++ DEBUGFUNC("e1000_write_i2c_byte_generic");
++
++ swfw_mask = E1000_SWFW_PHY0_SM;
++
++ if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask) != E1000_SUCCESS) {
++ status = E1000_ERR_SWFW_SYNC;
++ goto write_byte_out;
++ }
++
++ do {
++ e1000_i2c_start(hw);
++
++ status = e1000_clock_out_i2c_byte(hw, dev_addr);
++ if (status != E1000_SUCCESS)
++ goto fail;
++
++ status = e1000_get_i2c_ack(hw);
++ if (status != E1000_SUCCESS)
++ goto fail;
++
++ status = e1000_clock_out_i2c_byte(hw, byte_offset);
++ if (status != E1000_SUCCESS)
++ goto fail;
++
++ status = e1000_get_i2c_ack(hw);
++ if (status != E1000_SUCCESS)
++ goto fail;
++
++ status = e1000_clock_out_i2c_byte(hw, data);
++ if (status != E1000_SUCCESS)
++ goto fail;
++
++ status = e1000_get_i2c_ack(hw);
++ if (status != E1000_SUCCESS)
++ goto fail;
++
++ e1000_i2c_stop(hw);
++ break;
++
++fail:
++ e1000_i2c_bus_clear(hw);
++ retry++;
++ if (retry < max_retry)
++ DEBUGOUT("I2C byte write error - Retrying.\n");
++ else
++ DEBUGOUT("I2C byte write error.\n");
++ } while (retry < max_retry);
++
++ hw->mac.ops.release_swfw_sync(hw, swfw_mask);
++
++write_byte_out:
++
++ return status;
++}
++
++/**
++ * e1000_i2c_start - Sets I2C start condition
++ * @hw: pointer to hardware structure
++ *
++ * Sets I2C start condition (High -> Low on SDA while SCL is High)
++ **/
++static void e1000_i2c_start(struct e1000_hw *hw)
++{
++ u32 i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS);
++
++ DEBUGFUNC("e1000_i2c_start");
++
++ /* Start condition must begin with data and clock high */
++ e1000_set_i2c_data(hw, &i2cctl, 1);
++ e1000_raise_i2c_clk(hw, &i2cctl);
++
++ /* Setup time for start condition (4.7us) */
++ usec_delay(E1000_I2C_T_SU_STA);
++
++ e1000_set_i2c_data(hw, &i2cctl, 0);
++
++ /* Hold time for start condition (4us) */
++ usec_delay(E1000_I2C_T_HD_STA);
++
++ e1000_lower_i2c_clk(hw, &i2cctl);
++
++ /* Minimum low period of clock is 4.7 us */
++ usec_delay(E1000_I2C_T_LOW);
++
+ }
+
+ /**
+- * igb_validate_nvm_checksum_with_offset - Validate EEPROM
+- * checksum
+- * @hw: pointer to the HW structure
+- * @offset: offset in words of the checksum protected region
++ * e1000_i2c_stop - Sets I2C stop condition
++ * @hw: pointer to hardware structure
+ *
+- * Calculates the EEPROM checksum by reading/adding each word of the EEPROM
+- * and then verifies that the sum of the EEPROM is equal to 0xBABA.
++ * Sets I2C stop condition (Low -> High on SDA while SCL is High)
+ **/
+-static s32 igb_validate_nvm_checksum_with_offset(struct e1000_hw *hw,
+- u16 offset)
++static void e1000_i2c_stop(struct e1000_hw *hw)
+ {
+- s32 ret_val = 0;
+- u16 checksum = 0;
+- u16 i, nvm_data;
++ u32 i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS);
+
+- for (i = offset; i < ((NVM_CHECKSUM_REG + offset) + 1); i++) {
+- ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data);
+- if (ret_val) {
+- hw_dbg("NVM Read Error\n");
+- goto out;
+- }
+- checksum += nvm_data;
+- }
++ DEBUGFUNC("e1000_i2c_stop");
+
+- if (checksum != (u16) NVM_SUM) {
+- hw_dbg("NVM Checksum Invalid\n");
+- ret_val = -E1000_ERR_NVM;
+- goto out;
+- }
++ /* Stop condition must begin with data low and clock high */
++ e1000_set_i2c_data(hw, &i2cctl, 0);
++ e1000_raise_i2c_clk(hw, &i2cctl);
+
+-out:
+- return ret_val;
++ /* Setup time for stop condition (4us) */
++ usec_delay(E1000_I2C_T_SU_STO);
++
++ e1000_set_i2c_data(hw, &i2cctl, 1);
++
++ /* bus free time between stop and start (4.7us)*/
++ usec_delay(E1000_I2C_T_BUF);
+ }
+
+ /**
+- * igb_update_nvm_checksum_with_offset - Update EEPROM
+- * checksum
+- * @hw: pointer to the HW structure
+- * @offset: offset in words of the checksum protected region
++ * e1000_clock_in_i2c_byte - Clocks in one byte via I2C
++ * @hw: pointer to hardware structure
++ * @data: data byte to clock in
+ *
+- * Updates the EEPROM checksum by reading/adding each word of the EEPROM
+- * up to the checksum. Then calculates the EEPROM checksum and writes the
+- * value to the EEPROM.
++ * Clocks in one byte data via I2C data/clock
+ **/
+-static s32 igb_update_nvm_checksum_with_offset(struct e1000_hw *hw, u16 offset)
++static s32 e1000_clock_in_i2c_byte(struct e1000_hw *hw, u8 *data)
+ {
+- s32 ret_val;
+- u16 checksum = 0;
+- u16 i, nvm_data;
++ s32 i;
++ bool bit = 0;
+
+- for (i = offset; i < (NVM_CHECKSUM_REG + offset); i++) {
+- ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data);
+- if (ret_val) {
+- hw_dbg("NVM Read Error while updating checksum.\n");
+- goto out;
+- }
+- checksum += nvm_data;
++ DEBUGFUNC("e1000_clock_in_i2c_byte");
++
++ *data = 0;
++ for (i = 7; i >= 0; i--) {
++ e1000_clock_in_i2c_bit(hw, &bit);
++ *data |= bit << i;
+ }
+- checksum = (u16) NVM_SUM - checksum;
+- ret_val = hw->nvm.ops.write(hw, (NVM_CHECKSUM_REG + offset), 1,
+- &checksum);
+- if (ret_val)
+- hw_dbg("NVM Write Error while updating checksum.\n");
+
+-out:
+- return ret_val;
++ return E1000_SUCCESS;
+ }
+
+ /**
+- * igb_validate_nvm_checksum_82580 - Validate EEPROM checksum
+- * @hw: pointer to the HW structure
++ * e1000_clock_out_i2c_byte - Clocks out one byte via I2C
++ * @hw: pointer to hardware structure
++ * @data: data byte clocked out
+ *
+- * Calculates the EEPROM section checksum by reading/adding each word of
+- * the EEPROM and then verifies that the sum of the EEPROM is
+- * equal to 0xBABA.
++ * Clocks out one byte data via I2C data/clock
+ **/
+-static s32 igb_validate_nvm_checksum_82580(struct e1000_hw *hw)
++static s32 e1000_clock_out_i2c_byte(struct e1000_hw *hw, u8 data)
+ {
+- s32 ret_val = 0;
+- u16 eeprom_regions_count = 1;
+- u16 j, nvm_data;
+- u16 nvm_offset;
++ s32 status = E1000_SUCCESS;
++ s32 i;
++ u32 i2cctl;
++ bool bit = 0;
+
+- ret_val = hw->nvm.ops.read(hw, NVM_COMPATIBILITY_REG_3, 1, &nvm_data);
+- if (ret_val) {
+- hw_dbg("NVM Read Error\n");
+- goto out;
+- }
++ DEBUGFUNC("e1000_clock_out_i2c_byte");
+
+- if (nvm_data & NVM_COMPATIBILITY_BIT_MASK) {
+- /* if checksums compatibility bit is set validate checksums
+- * for all 4 ports.
+- */
+- eeprom_regions_count = 4;
+- }
++ for (i = 7; i >= 0; i--) {
++ bit = (data >> i) & 0x1;
++ status = e1000_clock_out_i2c_bit(hw, bit);
+
+- for (j = 0; j < eeprom_regions_count; j++) {
+- nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j);
+- ret_val = igb_validate_nvm_checksum_with_offset(hw,
+- nvm_offset);
+- if (ret_val != 0)
+- goto out;
++ if (status != E1000_SUCCESS)
++ break;
+ }
+
+-out:
+- return ret_val;
++ /* Release SDA line (set high) */
++ i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS);
++
++ i2cctl |= E1000_I2C_DATA_OE_N;
++ E1000_WRITE_REG(hw, E1000_I2CPARAMS, i2cctl);
++ E1000_WRITE_FLUSH(hw);
++
++ return status;
+ }
+
+ /**
+- * igb_update_nvm_checksum_82580 - Update EEPROM checksum
+- * @hw: pointer to the HW structure
++ * e1000_get_i2c_ack - Polls for I2C ACK
++ * @hw: pointer to hardware structure
+ *
+- * Updates the EEPROM section checksums for all 4 ports by reading/adding
+- * each word of the EEPROM up to the checksum. Then calculates the EEPROM
+- * checksum and writes the value to the EEPROM.
++ * Clocks in/out one bit via I2C data/clock
+ **/
+-static s32 igb_update_nvm_checksum_82580(struct e1000_hw *hw)
++static s32 e1000_get_i2c_ack(struct e1000_hw *hw)
+ {
+- s32 ret_val;
+- u16 j, nvm_data;
+- u16 nvm_offset;
++ s32 status = E1000_SUCCESS;
++ u32 i = 0;
++ u32 i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS);
++ u32 timeout = 10;
++ bool ack = true;
+
+- ret_val = hw->nvm.ops.read(hw, NVM_COMPATIBILITY_REG_3, 1, &nvm_data);
+- if (ret_val) {
+- hw_dbg("NVM Read Error while updating checksum compatibility bit.\n");
+- goto out;
+- }
++ DEBUGFUNC("e1000_get_i2c_ack");
+
+- if ((nvm_data & NVM_COMPATIBILITY_BIT_MASK) == 0) {
+- /* set compatibility bit to validate checksums appropriately */
+- nvm_data = nvm_data | NVM_COMPATIBILITY_BIT_MASK;
+- ret_val = hw->nvm.ops.write(hw, NVM_COMPATIBILITY_REG_3, 1,
+- &nvm_data);
+- if (ret_val) {
+- hw_dbg("NVM Write Error while updating checksum compatibility bit.\n");
+- goto out;
+- }
++ e1000_raise_i2c_clk(hw, &i2cctl);
++
++ /* Minimum high period of clock is 4us */
++ usec_delay(E1000_I2C_T_HIGH);
++
++ /* Wait until SCL returns high */
++ for (i = 0; i < timeout; i++) {
++ usec_delay(1);
++ i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS);
++ if (i2cctl & E1000_I2C_CLK_IN)
++ break;
+ }
++ if (!(i2cctl & E1000_I2C_CLK_IN))
++ return E1000_ERR_I2C;
+
+- for (j = 0; j < 4; j++) {
+- nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j);
+- ret_val = igb_update_nvm_checksum_with_offset(hw, nvm_offset);
+- if (ret_val)
+- goto out;
++ ack = e1000_get_i2c_data(&i2cctl);
++ if (ack) {
++ DEBUGOUT("I2C ack was not received.\n");
++ status = E1000_ERR_I2C;
+ }
+
+-out:
+- return ret_val;
++ e1000_lower_i2c_clk(hw, &i2cctl);
++
++ /* Minimum low period of clock is 4.7 us */
++ usec_delay(E1000_I2C_T_LOW);
++
++ return status;
+ }
+
+ /**
+- * igb_validate_nvm_checksum_i350 - Validate EEPROM checksum
+- * @hw: pointer to the HW structure
++ * e1000_clock_in_i2c_bit - Clocks in one bit via I2C data/clock
++ * @hw: pointer to hardware structure
++ * @data: read data value
+ *
+- * Calculates the EEPROM section checksum by reading/adding each word of
+- * the EEPROM and then verifies that the sum of the EEPROM is
+- * equal to 0xBABA.
++ * Clocks in one bit via I2C data/clock
+ **/
+-static s32 igb_validate_nvm_checksum_i350(struct e1000_hw *hw)
++static s32 e1000_clock_in_i2c_bit(struct e1000_hw *hw, bool *data)
+ {
+- s32 ret_val = 0;
+- u16 j;
+- u16 nvm_offset;
++ u32 i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS);
+
+- for (j = 0; j < 4; j++) {
+- nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j);
+- ret_val = igb_validate_nvm_checksum_with_offset(hw,
+- nvm_offset);
+- if (ret_val != 0)
+- goto out;
+- }
++ DEBUGFUNC("e1000_clock_in_i2c_bit");
+
+-out:
+- return ret_val;
++ e1000_raise_i2c_clk(hw, &i2cctl);
++
++ /* Minimum high period of clock is 4us */
++ usec_delay(E1000_I2C_T_HIGH);
++
++ i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS);
++ *data = e1000_get_i2c_data(&i2cctl);
++
++ e1000_lower_i2c_clk(hw, &i2cctl);
++
++ /* Minimum low period of clock is 4.7 us */
++ usec_delay(E1000_I2C_T_LOW);
++
++ return E1000_SUCCESS;
+ }
+
+ /**
+- * igb_update_nvm_checksum_i350 - Update EEPROM checksum
+- * @hw: pointer to the HW structure
++ * e1000_clock_out_i2c_bit - Clocks in/out one bit via I2C data/clock
++ * @hw: pointer to hardware structure
++ * @data: data value to write
+ *
+- * Updates the EEPROM section checksums for all 4 ports by reading/adding
+- * each word of the EEPROM up to the checksum. Then calculates the EEPROM
+- * checksum and writes the value to the EEPROM.
++ * Clocks out one bit via I2C data/clock
+ **/
+-static s32 igb_update_nvm_checksum_i350(struct e1000_hw *hw)
++static s32 e1000_clock_out_i2c_bit(struct e1000_hw *hw, bool data)
+ {
+- s32 ret_val = 0;
+- u16 j;
+- u16 nvm_offset;
++ s32 status;
++ u32 i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS);
+
+- for (j = 0; j < 4; j++) {
+- nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j);
+- ret_val = igb_update_nvm_checksum_with_offset(hw, nvm_offset);
+- if (ret_val != 0)
+- goto out;
++ DEBUGFUNC("e1000_clock_out_i2c_bit");
++
++ status = e1000_set_i2c_data(hw, &i2cctl, data);
++ if (status == E1000_SUCCESS) {
++ e1000_raise_i2c_clk(hw, &i2cctl);
++
++ /* Minimum high period of clock is 4us */
++ usec_delay(E1000_I2C_T_HIGH);
++
++ e1000_lower_i2c_clk(hw, &i2cctl);
++
++ /* Minimum low period of clock is 4.7 us.
++ * This also takes care of the data hold time.
++ */
++ usec_delay(E1000_I2C_T_LOW);
++ } else {
++ status = E1000_ERR_I2C;
++ DEBUGOUT1("I2C data was not set to %X\n", data);
+ }
+
+-out:
+- return ret_val;
++ return status;
+ }
+-
+ /**
+- * __igb_access_emi_reg - Read/write EMI register
+- * @hw: pointer to the HW structure
+- * @addr: EMI address to program
+- * @data: pointer to value to read/write from/to the EMI address
+- * @read: boolean flag to indicate read or write
++ * e1000_raise_i2c_clk - Raises the I2C SCL clock
++ * @hw: pointer to hardware structure
++ * @i2cctl: Current value of I2CCTL register
++ *
++ * Raises the I2C clock line '0'->'1'
+ **/
+-static s32 __igb_access_emi_reg(struct e1000_hw *hw, u16 address,
+- u16 *data, bool read)
++static void e1000_raise_i2c_clk(struct e1000_hw *hw, u32 *i2cctl)
+ {
+- s32 ret_val = 0;
+-
+- ret_val = hw->phy.ops.write_reg(hw, E1000_EMIADD, address);
+- if (ret_val)
+- return ret_val;
++ DEBUGFUNC("e1000_raise_i2c_clk");
+
+- if (read)
+- ret_val = hw->phy.ops.read_reg(hw, E1000_EMIDATA, data);
+- else
+- ret_val = hw->phy.ops.write_reg(hw, E1000_EMIDATA, *data);
++ *i2cctl |= E1000_I2C_CLK_OUT;
++ *i2cctl &= ~E1000_I2C_CLK_OE_N;
++ E1000_WRITE_REG(hw, E1000_I2CPARAMS, *i2cctl);
++ E1000_WRITE_FLUSH(hw);
+
+- return ret_val;
++ /* SCL rise time (1000ns) */
++ usec_delay(E1000_I2C_T_RISE);
+ }
+
+ /**
+- * igb_read_emi_reg - Read Extended Management Interface register
+- * @hw: pointer to the HW structure
+- * @addr: EMI address to program
+- * @data: value to be read from the EMI address
++ * e1000_lower_i2c_clk - Lowers the I2C SCL clock
++ * @hw: pointer to hardware structure
++ * @i2cctl: Current value of I2CCTL register
++ *
++ * Lowers the I2C clock line '1'->'0'
+ **/
+-s32 igb_read_emi_reg(struct e1000_hw *hw, u16 addr, u16 *data)
++static void e1000_lower_i2c_clk(struct e1000_hw *hw, u32 *i2cctl)
+ {
+- return __igb_access_emi_reg(hw, addr, data, true);
++
++ DEBUGFUNC("e1000_lower_i2c_clk");
++
++ *i2cctl &= ~E1000_I2C_CLK_OUT;
++ *i2cctl &= ~E1000_I2C_CLK_OE_N;
++ E1000_WRITE_REG(hw, E1000_I2CPARAMS, *i2cctl);
++ E1000_WRITE_FLUSH(hw);
++
++ /* SCL fall time (300ns) */
++ usec_delay(E1000_I2C_T_FALL);
+ }
+
+ /**
+- * igb_set_eee_i350 - Enable/disable EEE support
+- * @hw: pointer to the HW structure
+- *
+- * Enable/disable EEE based on setting in dev_spec structure.
++ * e1000_set_i2c_data - Sets the I2C data bit
++ * @hw: pointer to hardware structure
++ * @i2cctl: Current value of I2CCTL register
++ * @data: I2C data value (0 or 1) to set
+ *
++ * Sets the I2C data bit
+ **/
+-s32 igb_set_eee_i350(struct e1000_hw *hw)
++static s32 e1000_set_i2c_data(struct e1000_hw *hw, u32 *i2cctl, bool data)
+ {
+- u32 ipcnfg, eeer;
++ s32 status = E1000_SUCCESS;
+
+- if ((hw->mac.type < e1000_i350) ||
+- (hw->phy.media_type != e1000_media_type_copper))
+- goto out;
+- ipcnfg = rd32(E1000_IPCNFG);
+- eeer = rd32(E1000_EEER);
++ DEBUGFUNC("e1000_set_i2c_data");
+
+- /* enable or disable per user setting */
+- if (!(hw->dev_spec._82575.eee_disable)) {
+- u32 eee_su = rd32(E1000_EEE_SU);
++ if (data)
++ *i2cctl |= E1000_I2C_DATA_OUT;
++ else
++ *i2cctl &= ~E1000_I2C_DATA_OUT;
+
+- ipcnfg |= (E1000_IPCNFG_EEE_1G_AN | E1000_IPCNFG_EEE_100M_AN);
+- eeer |= (E1000_EEER_TX_LPI_EN | E1000_EEER_RX_LPI_EN |
+- E1000_EEER_LPI_FC);
++ *i2cctl &= ~E1000_I2C_DATA_OE_N;
++ *i2cctl |= E1000_I2C_CLK_OE_N;
++ E1000_WRITE_REG(hw, E1000_I2CPARAMS, *i2cctl);
++ E1000_WRITE_FLUSH(hw);
+
+- /* This bit should not be set in normal operation. */
+- if (eee_su & E1000_EEE_SU_LPI_CLK_STP)
+- hw_dbg("LPI Clock Stop Bit should not be set!\n");
++ /* Data rise/fall (1000ns/300ns) and set-up time (250ns) */
++ usec_delay(E1000_I2C_T_RISE + E1000_I2C_T_FALL + E1000_I2C_T_SU_DATA);
+
+- } else {
+- ipcnfg &= ~(E1000_IPCNFG_EEE_1G_AN |
+- E1000_IPCNFG_EEE_100M_AN);
+- eeer &= ~(E1000_EEER_TX_LPI_EN |
+- E1000_EEER_RX_LPI_EN |
+- E1000_EEER_LPI_FC);
++ *i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS);
++ if (data != e1000_get_i2c_data(i2cctl)) {
++ status = E1000_ERR_I2C;
++ DEBUGOUT1("Error - I2C data was not set to %X.\n", data);
+ }
+- wr32(E1000_IPCNFG, ipcnfg);
+- wr32(E1000_EEER, eeer);
+- rd32(E1000_IPCNFG);
+- rd32(E1000_EEER);
+-out:
+
+- return 0;
++ return status;
+ }
+
+ /**
+- * igb_set_eee_i354 - Enable/disable EEE support
+- * @hw: pointer to the HW structure
+- *
+- * Enable/disable EEE legacy mode based on setting in dev_spec structure.
++ * e1000_get_i2c_data - Reads the I2C SDA data bit
++ * @hw: pointer to hardware structure
++ * @i2cctl: Current value of I2CCTL register
+ *
++ * Returns the I2C data bit value
+ **/
+-s32 igb_set_eee_i354(struct e1000_hw *hw)
++static bool e1000_get_i2c_data(u32 *i2cctl)
+ {
+- struct e1000_phy_info *phy = &hw->phy;
+- s32 ret_val = 0;
+- u16 phy_data;
+-
+- if ((hw->phy.media_type != e1000_media_type_copper) ||
+- (phy->id != M88E1543_E_PHY_ID))
+- goto out;
+-
+- if (!hw->dev_spec._82575.eee_disable) {
+- /* Switch to PHY page 18. */
+- ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 18);
+- if (ret_val)
+- goto out;
+-
+- ret_val = phy->ops.read_reg(hw, E1000_M88E1543_EEE_CTRL_1,
+- &phy_data);
+- if (ret_val)
+- goto out;
+-
+- phy_data |= E1000_M88E1543_EEE_CTRL_1_MS;
+- ret_val = phy->ops.write_reg(hw, E1000_M88E1543_EEE_CTRL_1,
+- phy_data);
+- if (ret_val)
+- goto out;
+-
+- /* Return the PHY to page 0. */
+- ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0);
+- if (ret_val)
+- goto out;
+-
+- /* Turn on EEE advertisement. */
+- ret_val = igb_read_xmdio_reg(hw, E1000_EEE_ADV_ADDR_I354,
+- E1000_EEE_ADV_DEV_I354,
+- &phy_data);
+- if (ret_val)
+- goto out;
++ bool data;
+
+- phy_data |= E1000_EEE_ADV_100_SUPPORTED |
+- E1000_EEE_ADV_1000_SUPPORTED;
+- ret_val = igb_write_xmdio_reg(hw, E1000_EEE_ADV_ADDR_I354,
+- E1000_EEE_ADV_DEV_I354,
+- phy_data);
+- } else {
+- /* Turn off EEE advertisement. */
+- ret_val = igb_read_xmdio_reg(hw, E1000_EEE_ADV_ADDR_I354,
+- E1000_EEE_ADV_DEV_I354,
+- &phy_data);
+- if (ret_val)
+- goto out;
++ DEBUGFUNC("e1000_get_i2c_data");
+
+- phy_data &= ~(E1000_EEE_ADV_100_SUPPORTED |
+- E1000_EEE_ADV_1000_SUPPORTED);
+- ret_val = igb_write_xmdio_reg(hw, E1000_EEE_ADV_ADDR_I354,
+- E1000_EEE_ADV_DEV_I354,
+- phy_data);
+- }
++ if (*i2cctl & E1000_I2C_DATA_IN)
++ data = 1;
++ else
++ data = 0;
+
+-out:
+- return ret_val;
++ return data;
+ }
+
+ /**
+- * igb_get_eee_status_i354 - Get EEE status
+- * @hw: pointer to the HW structure
+- * @status: EEE status
++ * e1000_i2c_bus_clear - Clears the I2C bus
++ * @hw: pointer to hardware structure
+ *
+- * Get EEE status by guessing based on whether Tx or Rx LPI indications have
+- * been received.
++ * Clears the I2C bus by sending nine clock pulses.
++ * Used when data line is stuck low.
+ **/
+-s32 igb_get_eee_status_i354(struct e1000_hw *hw, bool *status)
++void e1000_i2c_bus_clear(struct e1000_hw *hw)
+ {
+- struct e1000_phy_info *phy = &hw->phy;
+- s32 ret_val = 0;
+- u16 phy_data;
++ u32 i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS);
++ u32 i;
+
+- /* Check if EEE is supported on this device. */
+- if ((hw->phy.media_type != e1000_media_type_copper) ||
+- (phy->id != M88E1543_E_PHY_ID))
+- goto out;
++ DEBUGFUNC("e1000_i2c_bus_clear");
+
+- ret_val = igb_read_xmdio_reg(hw, E1000_PCS_STATUS_ADDR_I354,
+- E1000_PCS_STATUS_DEV_I354,
+- &phy_data);
+- if (ret_val)
+- goto out;
++ e1000_i2c_start(hw);
+
+- *status = phy_data & (E1000_PCS_STATUS_TX_LPI_RCVD |
+- E1000_PCS_STATUS_RX_LPI_RCVD) ? true : false;
++ e1000_set_i2c_data(hw, &i2cctl, 1);
+
+-out:
+- return ret_val;
++ for (i = 0; i < 9; i++) {
++ e1000_raise_i2c_clk(hw, &i2cctl);
++
++ /* Min high period of clock is 4us */
++ usec_delay(E1000_I2C_T_HIGH);
++
++ e1000_lower_i2c_clk(hw, &i2cctl);
++
++ /* Min low period of clock is 4.7us*/
++ usec_delay(E1000_I2C_T_LOW);
++ }
++
++ e1000_i2c_start(hw);
++
++ /* Put the i2c bus back to default state */
++ e1000_i2c_stop(hw);
+ }
+
+ static const u8 e1000_emc_temp_data[4] = {
+@@ -2707,14 +3782,13 @@
+ E1000_EMC_DIODE3_THERM_LIMIT
+ };
+
+-#ifdef CONFIG_IGB_HWMON
+ /**
+- * igb_get_thermal_sensor_data_generic - Gathers thermal sensor data
++ * e1000_get_thermal_sensor_data_generic - Gathers thermal sensor data
+ * @hw: pointer to hardware structure
+ *
+ * Updates the temperatures in mac.thermal_sensor_data
+ **/
+-static s32 igb_get_thermal_sensor_data_generic(struct e1000_hw *hw)
++s32 e1000_get_thermal_sensor_data_generic(struct e1000_hw *hw)
+ {
+ u16 ets_offset;
+ u16 ets_cfg;
+@@ -2725,17 +3799,19 @@
+ u8 i;
+ struct e1000_thermal_sensor_data *data = &hw->mac.thermal_sensor_data;
+
++ DEBUGFUNC("e1000_get_thermal_sensor_data_generic");
++
+ if ((hw->mac.type != e1000_i350) || (hw->bus.func != 0))
+ return E1000_NOT_IMPLEMENTED;
+
+- data->sensor[0].temp = (rd32(E1000_THMJT) & 0xFF);
++ data->sensor[0].temp = (E1000_READ_REG(hw, E1000_THMJT) & 0xFF);
+
+ /* Return the internal sensor only if ETS is unsupported */
+- hw->nvm.ops.read(hw, NVM_ETS_CFG, 1, &ets_offset);
++ e1000_read_nvm(hw, NVM_ETS_CFG, 1, &ets_offset);
+ if ((ets_offset == 0x0000) || (ets_offset == 0xFFFF))
+- return 0;
++ return E1000_SUCCESS;
+
+- hw->nvm.ops.read(hw, ets_offset, 1, &ets_cfg);
++ e1000_read_nvm(hw, ets_offset, 1, &ets_cfg);
+ if (((ets_cfg & NVM_ETS_TYPE_MASK) >> NVM_ETS_TYPE_SHIFT)
+ != NVM_ETS_TYPE_EMC)
+ return E1000_NOT_IMPLEMENTED;
+@@ -2745,7 +3821,7 @@
+ num_sensors = E1000_MAX_SENSORS;
+
+ for (i = 1; i < num_sensors; i++) {
+- hw->nvm.ops.read(hw, (ets_offset + i), 1, &ets_sensor);
++ e1000_read_nvm(hw, (ets_offset + i), 1, &ets_sensor);
+ sensor_index = ((ets_sensor & NVM_ETS_DATA_INDEX_MASK) >>
+ NVM_ETS_DATA_INDEX_SHIFT);
+ sensor_location = ((ets_sensor & NVM_ETS_DATA_LOC_MASK) >>
+@@ -2757,17 +3833,17 @@
+ E1000_I2C_THERMAL_SENSOR_ADDR,
+ &data->sensor[i].temp);
+ }
+- return 0;
++ return E1000_SUCCESS;
+ }
+
+ /**
+- * igb_init_thermal_sensor_thresh_generic - Sets thermal sensor thresholds
++ * e1000_init_thermal_sensor_thresh_generic - Sets thermal sensor thresholds
+ * @hw: pointer to hardware structure
+ *
+ * Sets the thermal sensor thresholds according to the NVM map
+ * and save off the threshold and location values into mac.thermal_sensor_data
+ **/
+-static s32 igb_init_thermal_sensor_thresh_generic(struct e1000_hw *hw)
++s32 e1000_init_thermal_sensor_thresh_generic(struct e1000_hw *hw)
+ {
+ u16 ets_offset;
+ u16 ets_cfg;
+@@ -2780,6 +3856,8 @@
+ u8 i;
+ struct e1000_thermal_sensor_data *data = &hw->mac.thermal_sensor_data;
+
++ DEBUGFUNC("e1000_init_thermal_sensor_thresh_generic");
++
+ if ((hw->mac.type != e1000_i350) || (hw->bus.func != 0))
+ return E1000_NOT_IMPLEMENTED;
+
+@@ -2787,16 +3865,16 @@
+
+ data->sensor[0].location = 0x1;
+ data->sensor[0].caution_thresh =
+- (rd32(E1000_THHIGHTC) & 0xFF);
++ (E1000_READ_REG(hw, E1000_THHIGHTC) & 0xFF);
+ data->sensor[0].max_op_thresh =
+- (rd32(E1000_THLOWTC) & 0xFF);
++ (E1000_READ_REG(hw, E1000_THLOWTC) & 0xFF);
+
+ /* Return the internal sensor only if ETS is unsupported */
+- hw->nvm.ops.read(hw, NVM_ETS_CFG, 1, &ets_offset);
++ e1000_read_nvm(hw, NVM_ETS_CFG, 1, &ets_offset);
+ if ((ets_offset == 0x0000) || (ets_offset == 0xFFFF))
+- return 0;
++ return E1000_SUCCESS;
+
+- hw->nvm.ops.read(hw, ets_offset, 1, &ets_cfg);
++ e1000_read_nvm(hw, ets_offset, 1, &ets_cfg);
+ if (((ets_cfg & NVM_ETS_TYPE_MASK) >> NVM_ETS_TYPE_SHIFT)
+ != NVM_ETS_TYPE_EMC)
+ return E1000_NOT_IMPLEMENTED;
+@@ -2806,7 +3884,7 @@
+ num_sensors = (ets_cfg & NVM_ETS_NUM_SENSORS_MASK);
+
+ for (i = 1; i <= num_sensors; i++) {
+- hw->nvm.ops.read(hw, (ets_offset + i), 1, &ets_sensor);
++ e1000_read_nvm(hw, (ets_offset + i), 1, &ets_sensor);
+ sensor_index = ((ets_sensor & NVM_ETS_DATA_INDEX_MASK) >>
+ NVM_ETS_DATA_INDEX_SHIFT);
+ sensor_location = ((ets_sensor & NVM_ETS_DATA_LOC_MASK) >>
+@@ -2825,41 +3903,5 @@
+ low_thresh_delta;
+ }
+ }
+- return 0;
++ return E1000_SUCCESS;
+ }
+-
+-#endif
+-static struct e1000_mac_operations e1000_mac_ops_82575 = {
+- .init_hw = igb_init_hw_82575,
+- .check_for_link = igb_check_for_link_82575,
+- .rar_set = igb_rar_set,
+- .read_mac_addr = igb_read_mac_addr_82575,
+- .get_speed_and_duplex = igb_get_link_up_info_82575,
+-#ifdef CONFIG_IGB_HWMON
+- .get_thermal_sensor_data = igb_get_thermal_sensor_data_generic,
+- .init_thermal_sensor_thresh = igb_init_thermal_sensor_thresh_generic,
+-#endif
+-};
+-
+-static struct e1000_phy_operations e1000_phy_ops_82575 = {
+- .acquire = igb_acquire_phy_82575,
+- .get_cfg_done = igb_get_cfg_done_82575,
+- .release = igb_release_phy_82575,
+- .write_i2c_byte = igb_write_i2c_byte,
+- .read_i2c_byte = igb_read_i2c_byte,
+-};
+-
+-static struct e1000_nvm_operations e1000_nvm_ops_82575 = {
+- .acquire = igb_acquire_nvm_82575,
+- .read = igb_read_nvm_eerd,
+- .release = igb_release_nvm_82575,
+- .write = igb_write_nvm_spi,
+-};
+-
+-const struct e1000_info e1000_82575_info = {
+- .get_invariants = igb_get_invariants_82575,
+- .mac_ops = &e1000_mac_ops_82575,
+- .phy_ops = &e1000_phy_ops_82575,
+- .nvm_ops = &e1000_nvm_ops_82575,
+-};
+-
+diff -Nu a/drivers/net/ethernet/intel/igb/e1000_82575.h b/drivers/net/ethernet/intel/igb/e1000_82575.h
+--- a/drivers/net/ethernet/intel/igb/e1000_82575.h 2016-11-13 09:20:24.790171605 +0000
++++ b/drivers/net/ethernet/intel/igb/e1000_82575.h 2016-11-14 14:32:08.579567168 +0000
+@@ -1,67 +1,149 @@
+-/* Intel(R) Gigabit Ethernet Linux driver
+- * Copyright(c) 2007-2014 Intel Corporation.
+- *
+- * This program is free software; you can redistribute it and/or modify it
+- * under the terms and conditions of the GNU General Public License,
+- * version 2, as published by the Free Software Foundation.
+- *
+- * This program is distributed in the hope it will be useful, but WITHOUT
+- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+- * more details.
+- *
+- * You should have received a copy of the GNU General Public License along with
+- * this program; if not, see .
+- *
+- * The full GNU General Public License is included in this distribution in
+- * the file called "COPYING".
+- *
+- * Contact Information:
+- * e1000-devel Mailing List
+- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+- */
++/*******************************************************************************
++
++ Intel(R) Gigabit Ethernet Linux driver
++ Copyright(c) 2007-2015 Intel Corporation.
++
++ This program is free software; you can redistribute it and/or modify it
++ under the terms and conditions of the GNU General Public License,
++ version 2, as published by the Free Software Foundation.
++
++ This program is distributed in the hope it will be useful, but WITHOUT
++ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ more details.
++
++ The full GNU General Public License is included in this distribution in
++ the file called "COPYING".
++
++ Contact Information:
++ Linux NICS
++ e1000-devel Mailing List
++ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
++
++*******************************************************************************/
+
+ #ifndef _E1000_82575_H_
+ #define _E1000_82575_H_
+
+-void igb_shutdown_serdes_link_82575(struct e1000_hw *hw);
+-void igb_power_up_serdes_link_82575(struct e1000_hw *hw);
+-void igb_power_down_phy_copper_82575(struct e1000_hw *hw);
+-void igb_rx_fifo_flush_82575(struct e1000_hw *hw);
+-s32 igb_read_i2c_byte(struct e1000_hw *hw, u8 byte_offset, u8 dev_addr,
+- u8 *data);
+-s32 igb_write_i2c_byte(struct e1000_hw *hw, u8 byte_offset, u8 dev_addr,
+- u8 data);
+-
+-#define ID_LED_DEFAULT_82575_SERDES ((ID_LED_DEF1_DEF2 << 12) | \
+- (ID_LED_DEF1_DEF2 << 8) | \
+- (ID_LED_DEF1_DEF2 << 4) | \
+- (ID_LED_OFF1_ON2))
+-
+-#define E1000_RAR_ENTRIES_82575 16
+-#define E1000_RAR_ENTRIES_82576 24
+-#define E1000_RAR_ENTRIES_82580 24
+-#define E1000_RAR_ENTRIES_I350 32
+-
+-#define E1000_SW_SYNCH_MB 0x00000100
+-#define E1000_STAT_DEV_RST_SET 0x00100000
+-#define E1000_CTRL_DEV_RST 0x20000000
+-
+-/* SRRCTL bit definitions */
+-#define E1000_SRRCTL_BSIZEPKT_SHIFT 10 /* Shift _right_ */
+-#define E1000_SRRCTL_BSIZEHDRSIZE_SHIFT 2 /* Shift _left_ */
+-#define E1000_SRRCTL_DESCTYPE_ADV_ONEBUF 0x02000000
+-#define E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS 0x0A000000
+-#define E1000_SRRCTL_DROP_EN 0x80000000
+-#define E1000_SRRCTL_TIMESTAMP 0x40000000
++#define ID_LED_DEFAULT_82575_SERDES ((ID_LED_DEF1_DEF2 << 12) | \
++ (ID_LED_DEF1_DEF2 << 8) | \
++ (ID_LED_DEF1_DEF2 << 4) | \
++ (ID_LED_OFF1_ON2))
++/*
++ * Receive Address Register Count
++ * Number of high/low register pairs in the RAR. The RAR (Receive Address
++ * Registers) holds the directed and multicast addresses that we monitor.
++ * These entries are also used for MAC-based filtering.
++ */
++/*
++ * For 82576, there are an additional set of RARs that begin at an offset
++ * separate from the first set of RARs.
++ */
++#define E1000_RAR_ENTRIES_82575 16
++#define E1000_RAR_ENTRIES_82576 24
++#define E1000_RAR_ENTRIES_82580 24
++#define E1000_RAR_ENTRIES_I350 32
++#define E1000_SW_SYNCH_MB 0x00000100
++#define E1000_STAT_DEV_RST_SET 0x00100000
++#define E1000_CTRL_DEV_RST 0x20000000
++
++struct e1000_adv_data_desc {
++ __le64 buffer_addr; /* Address of the descriptor's data buffer */
++ union {
++ u32 data;
++ struct {
++ u32 datalen:16; /* Data buffer length */
++ u32 rsvd:4;
++ u32 dtyp:4; /* Descriptor type */
++ u32 dcmd:8; /* Descriptor command */
++ } config;
++ } lower;
++ union {
++ u32 data;
++ struct {
++ u32 status:4; /* Descriptor status */
++ u32 idx:4;
++ u32 popts:6; /* Packet Options */
++ u32 paylen:18; /* Payload length */
++ } options;
++ } upper;
++};
+
++#define E1000_TXD_DTYP_ADV_C 0x2 /* Advanced Context Descriptor */
++#define E1000_TXD_DTYP_ADV_D 0x3 /* Advanced Data Descriptor */
++#define E1000_ADV_TXD_CMD_DEXT 0x20 /* Descriptor extension (0 = legacy) */
++#define E1000_ADV_TUCMD_IPV4 0x2 /* IP Packet Type: 1=IPv4 */
++#define E1000_ADV_TUCMD_IPV6 0x0 /* IP Packet Type: 0=IPv6 */
++#define E1000_ADV_TUCMD_L4T_UDP 0x0 /* L4 Packet TYPE of UDP */
++#define E1000_ADV_TUCMD_L4T_TCP 0x4 /* L4 Packet TYPE of TCP */
++#define E1000_ADV_TUCMD_MKRREQ 0x10 /* Indicates markers are required */
++#define E1000_ADV_DCMD_EOP 0x1 /* End of Packet */
++#define E1000_ADV_DCMD_IFCS 0x2 /* Insert FCS (Ethernet CRC) */
++#define E1000_ADV_DCMD_RS 0x8 /* Report Status */
++#define E1000_ADV_DCMD_VLE 0x40 /* Add VLAN tag */
++#define E1000_ADV_DCMD_TSE 0x80 /* TCP Seg enable */
++/* Extended Device Control */
++#define E1000_CTRL_EXT_NSICR 0x00000001 /* Disable Intr Clear all on read */
++
++struct e1000_adv_context_desc {
++ union {
++ u32 ip_config;
++ struct {
++ u32 iplen:9;
++ u32 maclen:7;
++ u32 vlan_tag:16;
++ } fields;
++ } ip_setup;
++ u32 seq_num;
++ union {
++ u64 l4_config;
++ struct {
++ u32 mkrloc:9;
++ u32 tucmd:11;
++ u32 dtyp:4;
++ u32 adv:8;
++ u32 rsvd:4;
++ u32 idx:4;
++ u32 l4len:8;
++ u32 mss:16;
++ } fields;
++ } l4_setup;
++};
+
+-#define E1000_MRQC_ENABLE_RSS_4Q 0x00000002
+-#define E1000_MRQC_ENABLE_VMDQ 0x00000003
+-#define E1000_MRQC_RSS_FIELD_IPV4_UDP 0x00400000
+-#define E1000_MRQC_ENABLE_VMDQ_RSS_2Q 0x00000005
+-#define E1000_MRQC_RSS_FIELD_IPV6_UDP 0x00800000
+-#define E1000_MRQC_RSS_FIELD_IPV6_UDP_EX 0x01000000
++/* SRRCTL bit definitions */
++#define E1000_SRRCTL_BSIZEPKT_SHIFT 10 /* Shift _right_ */
++#define E1000_SRRCTL_BSIZEHDRSIZE_MASK 0x00000F00
++#define E1000_SRRCTL_BSIZEHDRSIZE_SHIFT 2 /* Shift _left_ */
++#define E1000_SRRCTL_DESCTYPE_LEGACY 0x00000000
++#define E1000_SRRCTL_DESCTYPE_ADV_ONEBUF 0x02000000
++#define E1000_SRRCTL_DESCTYPE_HDR_SPLIT 0x04000000
++#define E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS 0x0A000000
++#define E1000_SRRCTL_DESCTYPE_HDR_REPLICATION 0x06000000
++#define E1000_SRRCTL_DESCTYPE_HDR_REPLICATION_LARGE_PKT 0x08000000
++#define E1000_SRRCTL_DESCTYPE_MASK 0x0E000000
++#define E1000_SRRCTL_TIMESTAMP 0x40000000
++#define E1000_SRRCTL_DROP_EN 0x80000000
++
++#define E1000_SRRCTL_BSIZEPKT_MASK 0x0000007F
++#define E1000_SRRCTL_BSIZEHDR_MASK 0x00003F00
++
++#define E1000_TX_HEAD_WB_ENABLE 0x1
++#define E1000_TX_SEQNUM_WB_ENABLE 0x2
++
++#define E1000_MRQC_ENABLE_RSS_4Q 0x00000002
++#define E1000_MRQC_ENABLE_VMDQ 0x00000003
++#define E1000_MRQC_ENABLE_VMDQ_RSS_2Q 0x00000005
++#define E1000_MRQC_RSS_FIELD_IPV4_UDP 0x00400000
++#define E1000_MRQC_RSS_FIELD_IPV6_UDP 0x00800000
++#define E1000_MRQC_RSS_FIELD_IPV6_UDP_EX 0x01000000
++#define E1000_MRQC_ENABLE_RSS_8Q 0x00000002
++
++#define E1000_VMRCTL_MIRROR_PORT_SHIFT 8
++#define E1000_VMRCTL_MIRROR_DSTPORT_MASK (7 << \
++ E1000_VMRCTL_MIRROR_PORT_SHIFT)
++#define E1000_VMRCTL_POOL_MIRROR_ENABLE (1 << 0)
++#define E1000_VMRCTL_UPLINK_MIRROR_ENABLE (1 << 1)
++#define E1000_VMRCTL_DOWNLINK_MIRROR_ENABLE (1 << 2)
+
+ #define E1000_EICR_TX_QUEUE ( \
+ E1000_EICR_TX_QUEUE0 | \
+@@ -75,42 +157,114 @@
+ E1000_EICR_RX_QUEUE2 | \
+ E1000_EICR_RX_QUEUE3)
+
++#define E1000_EIMS_RX_QUEUE E1000_EICR_RX_QUEUE
++#define E1000_EIMS_TX_QUEUE E1000_EICR_TX_QUEUE
++
++#define EIMS_ENABLE_MASK ( \
++ E1000_EIMS_RX_QUEUE | \
++ E1000_EIMS_TX_QUEUE | \
++ E1000_EIMS_TCP_TIMER | \
++ E1000_EIMS_OTHER)
++
+ /* Immediate Interrupt Rx (A.K.A. Low Latency Interrupt) */
+-#define E1000_IMIREXT_SIZE_BP 0x00001000 /* Packet size bypass */
+-#define E1000_IMIREXT_CTRL_BP 0x00080000 /* Bypass check of ctrl bits */
++#define E1000_IMIR_PORT_IM_EN 0x00010000 /* TCP port enable */
++#define E1000_IMIR_PORT_BP 0x00020000 /* TCP port check bypass */
++#define E1000_IMIREXT_SIZE_BP 0x00001000 /* Packet size bypass */
++#define E1000_IMIREXT_CTRL_URG 0x00002000 /* Check URG bit in header */
++#define E1000_IMIREXT_CTRL_ACK 0x00004000 /* Check ACK bit in header */
++#define E1000_IMIREXT_CTRL_PSH 0x00008000 /* Check PSH bit in header */
++#define E1000_IMIREXT_CTRL_RST 0x00010000 /* Check RST bit in header */
++#define E1000_IMIREXT_CTRL_SYN 0x00020000 /* Check SYN bit in header */
++#define E1000_IMIREXT_CTRL_FIN 0x00040000 /* Check FIN bit in header */
++#define E1000_IMIREXT_CTRL_BP 0x00080000 /* Bypass check of ctrl bits */
+
+ /* Receive Descriptor - Advanced */
+ union e1000_adv_rx_desc {
+ struct {
+- __le64 pkt_addr; /* Packet buffer address */
+- __le64 hdr_addr; /* Header buffer address */
++ __le64 pkt_addr; /* Packet buffer address */
++ __le64 hdr_addr; /* Header buffer address */
+ } read;
+ struct {
+ struct {
+- struct {
+- __le16 pkt_info; /* RSS type, Packet type */
+- __le16 hdr_info; /* Split Head, buf len */
++ union {
++ __le32 data;
++ struct {
++ __le16 pkt_info; /*RSS type, Pkt type*/
++ /* Split Header, header buffer len */
++ __le16 hdr_info;
++ } hs_rss;
+ } lo_dword;
+ union {
+- __le32 rss; /* RSS Hash */
++ __le32 rss; /* RSS Hash */
+ struct {
+- __le16 ip_id; /* IP id */
+- __le16 csum; /* Packet Checksum */
++ __le16 ip_id; /* IP id */
++ __le16 csum; /* Packet Checksum */
+ } csum_ip;
+ } hi_dword;
+ } lower;
+ struct {
+- __le32 status_error; /* ext status/error */
+- __le16 length; /* Packet length */
+- __le16 vlan; /* VLAN tag */
++ __le32 status_error; /* ext status/error */
++ __le16 length; /* Packet length */
++ __le16 vlan; /* VLAN tag */
+ } upper;
+ } wb; /* writeback */
+ };
+
+-#define E1000_RXDADV_HDRBUFLEN_MASK 0x7FE0
+-#define E1000_RXDADV_HDRBUFLEN_SHIFT 5
+-#define E1000_RXDADV_STAT_TS 0x10000 /* Pkt was time stamped */
+-#define E1000_RXDADV_STAT_TSIP 0x08000 /* timestamp in packet */
++#define E1000_RXDADV_RSSTYPE_MASK 0x0000000F
++#define E1000_RXDADV_RSSTYPE_SHIFT 12
++#define E1000_RXDADV_HDRBUFLEN_MASK 0x7FE0
++#define E1000_RXDADV_HDRBUFLEN_SHIFT 5
++#define E1000_RXDADV_SPLITHEADER_EN 0x00001000
++#define E1000_RXDADV_SPH 0x8000
++#define E1000_RXDADV_STAT_TS 0x10000 /* Pkt was time stamped */
++#define E1000_RXDADV_STAT_TSIP 0x08000 /* timestamp in packet */
++#define E1000_RXDADV_ERR_HBO 0x00800000
++
++/* RSS Hash results */
++#define E1000_RXDADV_RSSTYPE_NONE 0x00000000
++#define E1000_RXDADV_RSSTYPE_IPV4_TCP 0x00000001
++#define E1000_RXDADV_RSSTYPE_IPV4 0x00000002
++#define E1000_RXDADV_RSSTYPE_IPV6_TCP 0x00000003
++#define E1000_RXDADV_RSSTYPE_IPV6_EX 0x00000004
++#define E1000_RXDADV_RSSTYPE_IPV6 0x00000005
++#define E1000_RXDADV_RSSTYPE_IPV6_TCP_EX 0x00000006
++#define E1000_RXDADV_RSSTYPE_IPV4_UDP 0x00000007
++#define E1000_RXDADV_RSSTYPE_IPV6_UDP 0x00000008
++#define E1000_RXDADV_RSSTYPE_IPV6_UDP_EX 0x00000009
++
++/* RSS Packet Types as indicated in the receive descriptor */
++#define E1000_RXDADV_PKTTYPE_ILMASK 0x000000F0
++#define E1000_RXDADV_PKTTYPE_TLMASK 0x00000F00
++#define E1000_RXDADV_PKTTYPE_NONE 0x00000000
++#define E1000_RXDADV_PKTTYPE_IPV4 0x00000010 /* IPV4 hdr present */
++#define E1000_RXDADV_PKTTYPE_IPV4_EX 0x00000020 /* IPV4 hdr + extensions */
++#define E1000_RXDADV_PKTTYPE_IPV6 0x00000040 /* IPV6 hdr present */
++#define E1000_RXDADV_PKTTYPE_IPV6_EX 0x00000080 /* IPV6 hdr + extensions */
++#define E1000_RXDADV_PKTTYPE_TCP 0x00000100 /* TCP hdr present */
++#define E1000_RXDADV_PKTTYPE_UDP 0x00000200 /* UDP hdr present */
++#define E1000_RXDADV_PKTTYPE_SCTP 0x00000400 /* SCTP hdr present */
++#define E1000_RXDADV_PKTTYPE_NFS 0x00000800 /* NFS hdr present */
++
++#define E1000_RXDADV_PKTTYPE_IPSEC_ESP 0x00001000 /* IPSec ESP */
++#define E1000_RXDADV_PKTTYPE_IPSEC_AH 0x00002000 /* IPSec AH */
++#define E1000_RXDADV_PKTTYPE_LINKSEC 0x00004000 /* LinkSec Encap */
++#define E1000_RXDADV_PKTTYPE_ETQF 0x00008000 /* PKTTYPE is ETQF index */
++#define E1000_RXDADV_PKTTYPE_ETQF_MASK 0x00000070 /* ETQF has 8 indices */
++#define E1000_RXDADV_PKTTYPE_ETQF_SHIFT 4 /* Right-shift 4 bits */
++
++/* LinkSec results */
++/* Security Processing bit Indication */
++#define E1000_RXDADV_LNKSEC_STATUS_SECP 0x00020000
++#define E1000_RXDADV_LNKSEC_ERROR_BIT_MASK 0x18000000
++#define E1000_RXDADV_LNKSEC_ERROR_NO_SA_MATCH 0x08000000
++#define E1000_RXDADV_LNKSEC_ERROR_REPLAY_ERROR 0x10000000
++#define E1000_RXDADV_LNKSEC_ERROR_BAD_SIG 0x18000000
++
++#define E1000_RXDADV_IPSEC_STATUS_SECP 0x00020000
++#define E1000_RXDADV_IPSEC_ERROR_BIT_MASK 0x18000000
++#define E1000_RXDADV_IPSEC_ERROR_INVALID_PROTOCOL 0x08000000
++#define E1000_RXDADV_IPSEC_ERROR_INVALID_LENGTH 0x10000000
++#define E1000_RXDADV_IPSEC_ERROR_AUTHENTICATION_FAILED 0x18000000
+
+ /* Transmit Descriptor - Advanced */
+ union e1000_adv_tx_desc {
+@@ -127,16 +281,26 @@
+ };
+
+ /* Adv Transmit Descriptor Config Masks */
+-#define E1000_ADVTXD_MAC_TSTAMP 0x00080000 /* IEEE1588 Timestamp packet */
+-#define E1000_ADVTXD_DTYP_CTXT 0x00200000 /* Advanced Context Descriptor */
+-#define E1000_ADVTXD_DTYP_DATA 0x00300000 /* Advanced Data Descriptor */
+-#define E1000_ADVTXD_DCMD_EOP 0x01000000 /* End of Packet */
+-#define E1000_ADVTXD_DCMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */
+-#define E1000_ADVTXD_DCMD_RS 0x08000000 /* Report Status */
+-#define E1000_ADVTXD_DCMD_DEXT 0x20000000 /* Descriptor extension (1=Adv) */
+-#define E1000_ADVTXD_DCMD_VLE 0x40000000 /* VLAN pkt enable */
+-#define E1000_ADVTXD_DCMD_TSE 0x80000000 /* TCP Seg enable */
+-#define E1000_ADVTXD_PAYLEN_SHIFT 14 /* Adv desc PAYLEN shift */
++#define E1000_ADVTXD_DTYP_CTXT 0x00200000 /* Advanced Context Descriptor */
++#define E1000_ADVTXD_DTYP_DATA 0x00300000 /* Advanced Data Descriptor */
++#define E1000_ADVTXD_DCMD_EOP 0x01000000 /* End of Packet */
++#define E1000_ADVTXD_DCMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */
++#define E1000_ADVTXD_DCMD_RS 0x08000000 /* Report Status */
++#define E1000_ADVTXD_DCMD_DDTYP_ISCSI 0x10000000 /* DDP hdr type or iSCSI */
++#define E1000_ADVTXD_DCMD_DEXT 0x20000000 /* Descriptor extension (1=Adv) */
++#define E1000_ADVTXD_DCMD_VLE 0x40000000 /* VLAN pkt enable */
++#define E1000_ADVTXD_DCMD_TSE 0x80000000 /* TCP Seg enable */
++#define E1000_ADVTXD_MAC_LINKSEC 0x00040000 /* Apply LinkSec on pkt */
++#define E1000_ADVTXD_MAC_TSTAMP 0x00080000 /* IEEE1588 Timestamp pkt */
++#define E1000_ADVTXD_STAT_SN_CRC 0x00000002 /* NXTSEQ/SEED prsnt in WB */
++#define E1000_ADVTXD_IDX_SHIFT 4 /* Adv desc Index shift */
++#define E1000_ADVTXD_POPTS_ISCO_1ST 0x00000000 /* 1st TSO of iSCSI PDU */
++#define E1000_ADVTXD_POPTS_ISCO_MDL 0x00000800 /* Middle TSO of iSCSI PDU */
++#define E1000_ADVTXD_POPTS_ISCO_LAST 0x00001000 /* Last TSO of iSCSI PDU */
++/* 1st & Last TSO-full iSCSI PDU*/
++#define E1000_ADVTXD_POPTS_ISCO_FULL 0x00001800
++#define E1000_ADVTXD_POPTS_IPSEC 0x00000400 /* IPSec offload request */
++#define E1000_ADVTXD_PAYLEN_SHIFT 14 /* Adv desc PAYLEN shift */
+
+ /* Context descriptors */
+ struct e1000_adv_tx_context_desc {
+@@ -146,127 +310,174 @@
+ __le32 mss_l4len_idx;
+ };
+
+-#define E1000_ADVTXD_MACLEN_SHIFT 9 /* Adv ctxt desc mac len shift */
+-#define E1000_ADVTXD_TUCMD_IPV4 0x00000400 /* IP Packet Type: 1=IPv4 */
+-#define E1000_ADVTXD_TUCMD_L4T_TCP 0x00000800 /* L4 Packet TYPE of TCP */
+-#define E1000_ADVTXD_TUCMD_L4T_SCTP 0x00001000 /* L4 packet TYPE of SCTP */
++#define E1000_ADVTXD_MACLEN_SHIFT 9 /* Adv ctxt desc mac len shift */
++#define E1000_ADVTXD_VLAN_SHIFT 16 /* Adv ctxt vlan tag shift */
++#define E1000_ADVTXD_TUCMD_IPV4 0x00000400 /* IP Packet Type: 1=IPv4 */
++#define E1000_ADVTXD_TUCMD_IPV6 0x00000000 /* IP Packet Type: 0=IPv6 */
++#define E1000_ADVTXD_TUCMD_L4T_UDP 0x00000000 /* L4 Packet TYPE of UDP */
++#define E1000_ADVTXD_TUCMD_L4T_TCP 0x00000800 /* L4 Packet TYPE of TCP */
++#define E1000_ADVTXD_TUCMD_L4T_SCTP 0x00001000 /* L4 Packet TYPE of SCTP */
++#define E1000_ADVTXD_TUCMD_IPSEC_TYPE_ESP 0x00002000 /* IPSec Type ESP */
+ /* IPSec Encrypt Enable for ESP */
+-#define E1000_ADVTXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */
+-#define E1000_ADVTXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */
++#define E1000_ADVTXD_TUCMD_IPSEC_ENCRYPT_EN 0x00004000
++/* Req requires Markers and CRC */
++#define E1000_ADVTXD_TUCMD_MKRREQ 0x00002000
++#define E1000_ADVTXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */
++#define E1000_ADVTXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */
+ /* Adv ctxt IPSec SA IDX mask */
++#define E1000_ADVTXD_IPSEC_SA_INDEX_MASK 0x000000FF
+ /* Adv ctxt IPSec ESP len mask */
++#define E1000_ADVTXD_IPSEC_ESP_LEN_MASK 0x000000FF
+
+ /* Additional Transmit Descriptor Control definitions */
+-#define E1000_TXDCTL_QUEUE_ENABLE 0x02000000 /* Enable specific Tx Queue */
++#define E1000_TXDCTL_QUEUE_ENABLE 0x02000000 /* Ena specific Tx Queue */
++#define E1000_TXDCTL_SWFLSH 0x04000000 /* Tx Desc. wbk flushing */
+ /* Tx Queue Arbitration Priority 0=low, 1=high */
++#define E1000_TXDCTL_PRIORITY 0x08000000
+
+ /* Additional Receive Descriptor Control definitions */
+-#define E1000_RXDCTL_QUEUE_ENABLE 0x02000000 /* Enable specific Rx Queue */
++#define E1000_RXDCTL_QUEUE_ENABLE 0x02000000 /* Ena specific Rx Queue */
++#define E1000_RXDCTL_SWFLSH 0x04000000 /* Rx Desc. wbk flushing */
+
+ /* Direct Cache Access (DCA) definitions */
+-#define E1000_DCA_CTRL_DCA_MODE_DISABLE 0x01 /* DCA Disable */
+-#define E1000_DCA_CTRL_DCA_MODE_CB2 0x02 /* DCA Mode CB2 */
++#define E1000_DCA_CTRL_DCA_ENABLE 0x00000000 /* DCA Enable */
++#define E1000_DCA_CTRL_DCA_DISABLE 0x00000001 /* DCA Disable */
+
+-#define E1000_DCA_RXCTRL_CPUID_MASK 0x0000001F /* Rx CPUID Mask */
+-#define E1000_DCA_RXCTRL_DESC_DCA_EN (1 << 5) /* DCA Rx Desc enable */
+-#define E1000_DCA_RXCTRL_HEAD_DCA_EN (1 << 6) /* DCA Rx Desc header enable */
+-#define E1000_DCA_RXCTRL_DATA_DCA_EN (1 << 7) /* DCA Rx Desc payload enable */
+-#define E1000_DCA_RXCTRL_DESC_RRO_EN (1 << 9) /* DCA Rx rd Desc Relax Order */
+-
+-#define E1000_DCA_TXCTRL_CPUID_MASK 0x0000001F /* Tx CPUID Mask */
+-#define E1000_DCA_TXCTRL_DESC_DCA_EN (1 << 5) /* DCA Tx Desc enable */
+-#define E1000_DCA_TXCTRL_DESC_RRO_EN (1 << 9) /* Tx rd Desc Relax Order */
+-#define E1000_DCA_TXCTRL_TX_WB_RO_EN (1 << 11) /* Tx Desc writeback RO bit */
+-#define E1000_DCA_TXCTRL_DATA_RRO_EN (1 << 13) /* Tx rd data Relax Order */
+-
+-/* Additional DCA related definitions, note change in position of CPUID */
+-#define E1000_DCA_TXCTRL_CPUID_MASK_82576 0xFF000000 /* Tx CPUID Mask */
+-#define E1000_DCA_RXCTRL_CPUID_MASK_82576 0xFF000000 /* Rx CPUID Mask */
+-#define E1000_DCA_TXCTRL_CPUID_SHIFT 24 /* Tx CPUID now in the last byte */
+-#define E1000_DCA_RXCTRL_CPUID_SHIFT 24 /* Rx CPUID now in the last byte */
++#define E1000_DCA_CTRL_DCA_MODE_CB1 0x00 /* DCA Mode CB1 */
++#define E1000_DCA_CTRL_DCA_MODE_CB2 0x02 /* DCA Mode CB2 */
++
++#define E1000_DCA_RXCTRL_CPUID_MASK 0x0000001F /* Rx CPUID Mask */
++#define E1000_DCA_RXCTRL_DESC_DCA_EN (1 << 5) /* DCA Rx Desc enable */
++#define E1000_DCA_RXCTRL_HEAD_DCA_EN (1 << 6) /* DCA Rx Desc header ena */
++#define E1000_DCA_RXCTRL_DATA_DCA_EN (1 << 7) /* DCA Rx Desc payload ena */
++#define E1000_DCA_RXCTRL_DESC_RRO_EN (1 << 9) /* DCA Rx Desc Relax Order */
++
++#define E1000_DCA_TXCTRL_CPUID_MASK 0x0000001F /* Tx CPUID Mask */
++#define E1000_DCA_TXCTRL_DESC_DCA_EN (1 << 5) /* DCA Tx Desc enable */
++#define E1000_DCA_TXCTRL_DESC_RRO_EN (1 << 9) /* Tx rd Desc Relax Order */
++#define E1000_DCA_TXCTRL_TX_WB_RO_EN (1 << 11) /* Tx Desc writeback RO bit */
++#define E1000_DCA_TXCTRL_DATA_RRO_EN (1 << 13) /* Tx rd data Relax Order */
++
++#define E1000_DCA_TXCTRL_CPUID_MASK_82576 0xFF000000 /* Tx CPUID Mask */
++#define E1000_DCA_RXCTRL_CPUID_MASK_82576 0xFF000000 /* Rx CPUID Mask */
++#define E1000_DCA_TXCTRL_CPUID_SHIFT_82576 24 /* Tx CPUID */
++#define E1000_DCA_RXCTRL_CPUID_SHIFT_82576 24 /* Rx CPUID */
++
++/* Additional interrupt register bit definitions */
++#define E1000_ICR_LSECPNS 0x00000020 /* PN threshold - server */
++#define E1000_IMS_LSECPNS E1000_ICR_LSECPNS /* PN threshold - server */
++#define E1000_ICS_LSECPNS E1000_ICR_LSECPNS /* PN threshold - server */
+
+ /* ETQF register bit definitions */
+-#define E1000_ETQF_FILTER_ENABLE (1 << 26)
+-#define E1000_ETQF_1588 (1 << 30)
++#define E1000_ETQF_FILTER_ENABLE (1 << 26)
++#define E1000_ETQF_IMM_INT (1 << 29)
++#define E1000_ETQF_1588 (1 << 30)
++#define E1000_ETQF_QUEUE_ENABLE (1 << 31)
++/*
++ * ETQF filter list: one static filter per filter consumer. This is
++ * to avoid filter collisions later. Add new filters
++ * here!!
++ *
++ * Current filters:
++ * EAPOL 802.1x (0x888e): Filter 0
++ */
++#define E1000_ETQF_FILTER_EAPOL 0
+
+-/* FTQF register bit definitions */
+-#define E1000_FTQF_VF_BP 0x00008000
+-#define E1000_FTQF_1588_TIME_STAMP 0x08000000
+-#define E1000_FTQF_MASK 0xF0000000
+-#define E1000_FTQF_MASK_PROTO_BP 0x10000000
+-#define E1000_FTQF_MASK_SOURCE_PORT_BP 0x80000000
+-
+-#define E1000_NVM_APME_82575 0x0400
+-#define MAX_NUM_VFS 8
+-
+-#define E1000_DTXSWC_MAC_SPOOF_MASK 0x000000FF /* Per VF MAC spoof control */
+-#define E1000_DTXSWC_VLAN_SPOOF_MASK 0x0000FF00 /* Per VF VLAN spoof control */
+-#define E1000_DTXSWC_LLE_MASK 0x00FF0000 /* Per VF Local LB enables */
+-#define E1000_DTXSWC_VLAN_SPOOF_SHIFT 8
+-#define E1000_DTXSWC_VMDQ_LOOPBACK_EN (1 << 31) /* global VF LB enable */
++#define E1000_FTQF_VF_BP 0x00008000
++#define E1000_FTQF_1588_TIME_STAMP 0x08000000
++#define E1000_FTQF_MASK 0xF0000000
++#define E1000_FTQF_MASK_PROTO_BP 0x10000000
++#define E1000_FTQF_MASK_SOURCE_ADDR_BP 0x20000000
++#define E1000_FTQF_MASK_DEST_ADDR_BP 0x40000000
++#define E1000_FTQF_MASK_SOURCE_PORT_BP 0x80000000
++
++#define E1000_NVM_APME_82575 0x0400
++#define MAX_NUM_VFS 7
++
++#define E1000_DTXSWC_MAC_SPOOF_MASK 0x000000FF /* Per VF MAC spoof cntrl */
++#define E1000_DTXSWC_VLAN_SPOOF_MASK 0x0000FF00 /* Per VF VLAN spoof cntrl */
++#define E1000_DTXSWC_LLE_MASK 0x00FF0000 /* Per VF Local LB enables */
++#define E1000_DTXSWC_VLAN_SPOOF_SHIFT 8
++#define E1000_DTXSWC_LLE_SHIFT 16
++#define E1000_DTXSWC_VMDQ_LOOPBACK_EN (1 << 31) /* global VF LB enable */
+
+ /* Easy defines for setting default pool, would normally be left a zero */
+-#define E1000_VT_CTL_DEFAULT_POOL_SHIFT 7
+-#define E1000_VT_CTL_DEFAULT_POOL_MASK (0x7 << E1000_VT_CTL_DEFAULT_POOL_SHIFT)
++#define E1000_VT_CTL_DEFAULT_POOL_SHIFT 7
++#define E1000_VT_CTL_DEFAULT_POOL_MASK (0x7 << E1000_VT_CTL_DEFAULT_POOL_SHIFT)
+
+ /* Other useful VMD_CTL register defines */
+-#define E1000_VT_CTL_IGNORE_MAC (1 << 28)
+-#define E1000_VT_CTL_DISABLE_DEF_POOL (1 << 29)
+-#define E1000_VT_CTL_VM_REPL_EN (1 << 30)
++#define E1000_VT_CTL_IGNORE_MAC (1 << 28)
++#define E1000_VT_CTL_DISABLE_DEF_POOL (1 << 29)
++#define E1000_VT_CTL_VM_REPL_EN (1 << 30)
+
+ /* Per VM Offload register setup */
+-#define E1000_VMOLR_RLPML_MASK 0x00003FFF /* Long Packet Maximum Length mask */
+-#define E1000_VMOLR_LPE 0x00010000 /* Accept Long packet */
+-#define E1000_VMOLR_RSSE 0x00020000 /* Enable RSS */
+-#define E1000_VMOLR_AUPE 0x01000000 /* Accept untagged packets */
+-#define E1000_VMOLR_ROMPE 0x02000000 /* Accept overflow multicast */
+-#define E1000_VMOLR_ROPE 0x04000000 /* Accept overflow unicast */
+-#define E1000_VMOLR_BAM 0x08000000 /* Accept Broadcast packets */
+-#define E1000_VMOLR_MPME 0x10000000 /* Multicast promiscuous mode */
+-#define E1000_VMOLR_STRVLAN 0x40000000 /* Vlan stripping enable */
+-#define E1000_VMOLR_STRCRC 0x80000000 /* CRC stripping enable */
+-
+-#define E1000_DVMOLR_HIDEVLAN 0x20000000 /* Hide vlan enable */
+-#define E1000_DVMOLR_STRVLAN 0x40000000 /* Vlan stripping enable */
+-#define E1000_DVMOLR_STRCRC 0x80000000 /* CRC stripping enable */
+-
+-#define E1000_VLVF_ARRAY_SIZE 32
+-#define E1000_VLVF_VLANID_MASK 0x00000FFF
+-#define E1000_VLVF_POOLSEL_SHIFT 12
+-#define E1000_VLVF_POOLSEL_MASK (0xFF << E1000_VLVF_POOLSEL_SHIFT)
+-#define E1000_VLVF_LVLAN 0x00100000
+-#define E1000_VLVF_VLANID_ENABLE 0x80000000
+-
+-#define E1000_VMVIR_VLANA_DEFAULT 0x40000000 /* Always use default VLAN */
+-#define E1000_VMVIR_VLANA_NEVER 0x80000000 /* Never insert VLAN tag */
+-
+-#define E1000_IOVCTL 0x05BBC
+-#define E1000_IOVCTL_REUSE_VFQ 0x00000001
+-
+-#define E1000_RPLOLR_STRVLAN 0x40000000
+-#define E1000_RPLOLR_STRCRC 0x80000000
+-
+-#define E1000_DTXCTL_8023LL 0x0004
+-#define E1000_DTXCTL_VLAN_ADDED 0x0008
+-#define E1000_DTXCTL_OOS_ENABLE 0x0010
+-#define E1000_DTXCTL_MDP_EN 0x0020
+-#define E1000_DTXCTL_SPOOF_INT 0x0040
++#define E1000_VMOLR_RLPML_MASK 0x00003FFF /* Long Packet Maximum Length mask */
++#define E1000_VMOLR_LPE 0x00010000 /* Accept Long packet */
++#define E1000_VMOLR_RSSE 0x00020000 /* Enable RSS */
++#define E1000_VMOLR_AUPE 0x01000000 /* Accept untagged packets */
++#define E1000_VMOLR_ROMPE 0x02000000 /* Accept overflow multicast */
++#define E1000_VMOLR_ROPE 0x04000000 /* Accept overflow unicast */
++#define E1000_VMOLR_BAM 0x08000000 /* Accept Broadcast packets */
++#define E1000_VMOLR_MPME 0x10000000 /* Multicast promiscuous mode */
++#define E1000_VMOLR_STRVLAN 0x40000000 /* Vlan stripping enable */
++#define E1000_VMOLR_STRCRC 0x80000000 /* CRC stripping enable */
++
++#define E1000_VMOLR_VPE 0x00800000 /* VLAN promiscuous enable */
++#define E1000_VMOLR_UPE 0x20000000 /* Unicast promisuous enable */
++#define E1000_DVMOLR_HIDVLAN 0x20000000 /* Vlan hiding enable */
++#define E1000_DVMOLR_STRVLAN 0x40000000 /* Vlan stripping enable */
++#define E1000_DVMOLR_STRCRC 0x80000000 /* CRC stripping enable */
++
++#define E1000_PBRWAC_WALPB 0x00000007 /* Wrap around event on LAN Rx PB */
++#define E1000_PBRWAC_PBE 0x00000008 /* Rx packet buffer empty */
++
++#define E1000_VLVF_ARRAY_SIZE 32
++#define E1000_VLVF_VLANID_MASK 0x00000FFF
++#define E1000_VLVF_POOLSEL_SHIFT 12
++#define E1000_VLVF_POOLSEL_MASK (0xFF << E1000_VLVF_POOLSEL_SHIFT)
++#define E1000_VLVF_LVLAN 0x00100000
++#define E1000_VLVF_VLANID_ENABLE 0x80000000
++
++#define E1000_VMVIR_VLANA_DEFAULT 0x40000000 /* Always use default VLAN */
++#define E1000_VMVIR_VLANA_NEVER 0x80000000 /* Never insert VLAN tag */
++
++#define E1000_VF_INIT_TIMEOUT 200 /* Number of retries to clear RSTI */
++
++#define E1000_IOVCTL 0x05BBC
++#define E1000_IOVCTL_REUSE_VFQ 0x00000001
++
++#define E1000_RPLOLR_STRVLAN 0x40000000
++#define E1000_RPLOLR_STRCRC 0x80000000
++
++#define E1000_TCTL_EXT_COLD 0x000FFC00
++#define E1000_TCTL_EXT_COLD_SHIFT 10
++
++#define E1000_DTXCTL_8023LL 0x0004
++#define E1000_DTXCTL_VLAN_ADDED 0x0008
++#define E1000_DTXCTL_OOS_ENABLE 0x0010
++#define E1000_DTXCTL_MDP_EN 0x0020
++#define E1000_DTXCTL_SPOOF_INT 0x0040
+
+ #define E1000_EEPROM_PCS_AUTONEG_DISABLE_BIT (1 << 14)
+
+-#define ALL_QUEUES 0xFFFF
+-
+-/* RX packet buffer size defines */
+-#define E1000_RXPBS_SIZE_MASK_82576 0x0000007F
+-void igb_vmdq_set_anti_spoofing_pf(struct e1000_hw *, bool, int);
+-void igb_vmdq_set_loopback_pf(struct e1000_hw *, bool);
+-void igb_vmdq_set_replication_pf(struct e1000_hw *, bool);
+-u16 igb_rxpbs_adjust_82580(u32 data);
+-s32 igb_read_emi_reg(struct e1000_hw *, u16 addr, u16 *data);
+-s32 igb_set_eee_i350(struct e1000_hw *);
+-s32 igb_set_eee_i354(struct e1000_hw *);
+-s32 igb_get_eee_status_i354(struct e1000_hw *hw, bool *status);
++#define ALL_QUEUES 0xFFFF
+
++/* Rx packet buffer size defines */
++#define E1000_RXPBS_SIZE_MASK_82576 0x0000007F
++void e1000_vmdq_set_loopback_pf(struct e1000_hw *hw, bool enable);
++void e1000_vmdq_set_anti_spoofing_pf(struct e1000_hw *hw, bool enable, int pf);
++void e1000_vmdq_set_replication_pf(struct e1000_hw *hw, bool enable);
++s32 e1000_init_nvm_params_82575(struct e1000_hw *hw);
++s32 e1000_init_hw_82575(struct e1000_hw *hw);
++
++void e1000_write_vfta_i350(struct e1000_hw *hw, u32 offset, u32 value);
++u16 e1000_rxpbs_adjust_82580(u32 data);
++s32 e1000_read_emi_reg(struct e1000_hw *hw, u16 addr, u16 *data);
++s32 e1000_set_eee_i350(struct e1000_hw *hw, bool adv1G, bool adv100M);
++s32 e1000_set_eee_i354(struct e1000_hw *hw, bool adv1G, bool adv100M);
++s32 e1000_get_eee_status_i354(struct e1000_hw *, bool *);
++s32 e1000_initialize_M88E1512_phy(struct e1000_hw *hw);
++s32 e1000_initialize_M88E1543_phy(struct e1000_hw *hw);
+ #define E1000_I2C_THERMAL_SENSOR_ADDR 0xF8
+ #define E1000_EMC_INTERNAL_DATA 0x00
+ #define E1000_EMC_INTERNAL_THERM_LIMIT 0x20
+@@ -276,4 +487,26 @@
+ #define E1000_EMC_DIODE2_THERM_LIMIT 0x1A
+ #define E1000_EMC_DIODE3_DATA 0x2A
+ #define E1000_EMC_DIODE3_THERM_LIMIT 0x30
+-#endif
++
++s32 e1000_get_thermal_sensor_data_generic(struct e1000_hw *hw);
++s32 e1000_init_thermal_sensor_thresh_generic(struct e1000_hw *hw);
++
++/* I2C SDA and SCL timing parameters for standard mode */
++#define E1000_I2C_T_HD_STA 4
++#define E1000_I2C_T_LOW 5
++#define E1000_I2C_T_HIGH 4
++#define E1000_I2C_T_SU_STA 5
++#define E1000_I2C_T_HD_DATA 5
++#define E1000_I2C_T_SU_DATA 1
++#define E1000_I2C_T_RISE 1
++#define E1000_I2C_T_FALL 1
++#define E1000_I2C_T_SU_STO 4
++#define E1000_I2C_T_BUF 5
++
++s32 e1000_set_i2c_bb(struct e1000_hw *hw);
++s32 e1000_read_i2c_byte_generic(struct e1000_hw *hw, u8 byte_offset,
++ u8 dev_addr, u8 *data);
++s32 e1000_write_i2c_byte_generic(struct e1000_hw *hw, u8 byte_offset,
++ u8 dev_addr, u8 data);
++void e1000_i2c_bus_clear(struct e1000_hw *hw);
++#endif /* _E1000_82575_H_ */
+diff -Nu a/drivers/net/ethernet/intel/igb/e1000_api.c b/drivers/net/ethernet/intel/igb/e1000_api.c
+--- a/drivers/net/ethernet/intel/igb/e1000_api.c 1970-01-01 00:00:00.000000000 +0000
++++ b/drivers/net/ethernet/intel/igb/e1000_api.c 2016-11-14 14:32:08.579567168 +0000
+@@ -0,0 +1,1184 @@
++/*******************************************************************************
++
++ Intel(R) Gigabit Ethernet Linux driver
++ Copyright(c) 2007-2015 Intel Corporation.
++
++ This program is free software; you can redistribute it and/or modify it
++ under the terms and conditions of the GNU General Public License,
++ version 2, as published by the Free Software Foundation.
++
++ This program is distributed in the hope it will be useful, but WITHOUT
++ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ more details.
++
++ The full GNU General Public License is included in this distribution in
++ the file called "COPYING".
++
++ Contact Information:
++ Linux NICS
++ e1000-devel Mailing List
++ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
++
++*******************************************************************************/
++
++#include "e1000_api.h"
++
++/**
++ * e1000_init_mac_params - Initialize MAC function pointers
++ * @hw: pointer to the HW structure
++ *
++ * This function initializes the function pointers for the MAC
++ * set of functions. Called by drivers or by e1000_setup_init_funcs.
++ **/
++s32 e1000_init_mac_params(struct e1000_hw *hw)
++{
++ s32 ret_val = E1000_SUCCESS;
++
++ if (hw->mac.ops.init_params) {
++ ret_val = hw->mac.ops.init_params(hw);
++ if (ret_val) {
++ DEBUGOUT("MAC Initialization Error\n");
++ goto out;
++ }
++ } else {
++ DEBUGOUT("mac.init_mac_params was NULL\n");
++ ret_val = -E1000_ERR_CONFIG;
++ }
++
++out:
++ return ret_val;
++}
++
++/**
++ * e1000_init_nvm_params - Initialize NVM function pointers
++ * @hw: pointer to the HW structure
++ *
++ * This function initializes the function pointers for the NVM
++ * set of functions. Called by drivers or by e1000_setup_init_funcs.
++ **/
++s32 e1000_init_nvm_params(struct e1000_hw *hw)
++{
++ s32 ret_val = E1000_SUCCESS;
++
++ if (hw->nvm.ops.init_params) {
++ ret_val = hw->nvm.ops.init_params(hw);
++ if (ret_val) {
++ DEBUGOUT("NVM Initialization Error\n");
++ goto out;
++ }
++ } else {
++ DEBUGOUT("nvm.init_nvm_params was NULL\n");
++ ret_val = -E1000_ERR_CONFIG;
++ }
++
++out:
++ return ret_val;
++}
++
++/**
++ * e1000_init_phy_params - Initialize PHY function pointers
++ * @hw: pointer to the HW structure
++ *
++ * This function initializes the function pointers for the PHY
++ * set of functions. Called by drivers or by e1000_setup_init_funcs.
++ **/
++s32 e1000_init_phy_params(struct e1000_hw *hw)
++{
++ s32 ret_val = E1000_SUCCESS;
++
++ if (hw->phy.ops.init_params) {
++ ret_val = hw->phy.ops.init_params(hw);
++ if (ret_val) {
++ DEBUGOUT("PHY Initialization Error\n");
++ goto out;
++ }
++ } else {
++ DEBUGOUT("phy.init_phy_params was NULL\n");
++ ret_val = -E1000_ERR_CONFIG;
++ }
++
++out:
++ return ret_val;
++}
++
++/**
++ * e1000_init_mbx_params - Initialize mailbox function pointers
++ * @hw: pointer to the HW structure
++ *
++ * This function initializes the function pointers for the PHY
++ * set of functions. Called by drivers or by e1000_setup_init_funcs.
++ **/
++s32 e1000_init_mbx_params(struct e1000_hw *hw)
++{
++ s32 ret_val = E1000_SUCCESS;
++
++ if (hw->mbx.ops.init_params) {
++ ret_val = hw->mbx.ops.init_params(hw);
++ if (ret_val) {
++ DEBUGOUT("Mailbox Initialization Error\n");
++ goto out;
++ }
++ } else {
++ DEBUGOUT("mbx.init_mbx_params was NULL\n");
++ ret_val = -E1000_ERR_CONFIG;
++ }
++
++out:
++ return ret_val;
++}
++
++/**
++ * igb_e1000_set_mac_type - Sets MAC type
++ * @hw: pointer to the HW structure
++ *
++ * This function sets the mac type of the adapter based on the
++ * device ID stored in the hw structure.
++ * MUST BE FIRST FUNCTION CALLED (explicitly or through
++ * e1000_setup_init_funcs()).
++ **/
++/* Changed name, duplicated with e1000 */
++s32 igb_e1000_set_mac_type(struct e1000_hw *hw)
++{
++ struct e1000_mac_info *mac = &hw->mac;
++ s32 ret_val = E1000_SUCCESS;
++
++ DEBUGFUNC("igb_e1000_set_mac_type");
++
++ switch (hw->device_id) {
++ case E1000_DEV_ID_82575EB_COPPER:
++ case E1000_DEV_ID_82575EB_FIBER_SERDES:
++ case E1000_DEV_ID_82575GB_QUAD_COPPER:
++ mac->type = e1000_82575;
++ break;
++ case E1000_DEV_ID_82576:
++ case E1000_DEV_ID_82576_FIBER:
++ case E1000_DEV_ID_82576_SERDES:
++ case E1000_DEV_ID_82576_QUAD_COPPER:
++ case E1000_DEV_ID_82576_QUAD_COPPER_ET2:
++ case E1000_DEV_ID_82576_NS:
++ case E1000_DEV_ID_82576_NS_SERDES:
++ case E1000_DEV_ID_82576_SERDES_QUAD:
++ mac->type = e1000_82576;
++ break;
++ case E1000_DEV_ID_82580_COPPER:
++ case E1000_DEV_ID_82580_FIBER:
++ case E1000_DEV_ID_82580_SERDES:
++ case E1000_DEV_ID_82580_SGMII:
++ case E1000_DEV_ID_82580_COPPER_DUAL:
++ case E1000_DEV_ID_82580_QUAD_FIBER:
++ case E1000_DEV_ID_DH89XXCC_SGMII:
++ case E1000_DEV_ID_DH89XXCC_SERDES:
++ case E1000_DEV_ID_DH89XXCC_BACKPLANE:
++ case E1000_DEV_ID_DH89XXCC_SFP:
++ mac->type = e1000_82580;
++ break;
++ case E1000_DEV_ID_I350_COPPER:
++ case E1000_DEV_ID_I350_FIBER:
++ case E1000_DEV_ID_I350_SERDES:
++ case E1000_DEV_ID_I350_SGMII:
++ case E1000_DEV_ID_I350_DA4:
++ mac->type = e1000_i350;
++ break;
++ case E1000_DEV_ID_I210_COPPER_FLASHLESS:
++ case E1000_DEV_ID_I210_SERDES_FLASHLESS:
++ case E1000_DEV_ID_I210_COPPER:
++ case E1000_DEV_ID_I210_COPPER_OEM1:
++ case E1000_DEV_ID_I210_COPPER_IT:
++ case E1000_DEV_ID_I210_FIBER:
++ case E1000_DEV_ID_I210_SERDES:
++ case E1000_DEV_ID_I210_SGMII:
++ mac->type = e1000_i210;
++ break;
++ case E1000_DEV_ID_I211_COPPER:
++ mac->type = e1000_i211;
++ break;
++
++ case E1000_DEV_ID_I354_BACKPLANE_1GBPS:
++ case E1000_DEV_ID_I354_SGMII:
++ case E1000_DEV_ID_I354_BACKPLANE_2_5GBPS:
++ mac->type = e1000_i354;
++ break;
++ default:
++ /* Should never have loaded on this device */
++ ret_val = -E1000_ERR_MAC_INIT;
++ break;
++ }
++
++ return ret_val;
++}
++
++/**
++ * e1000_setup_init_funcs - Initializes function pointers
++ * @hw: pointer to the HW structure
++ * @init_device: true will initialize the rest of the function pointers
++ * getting the device ready for use. false will only set
++ * MAC type and the function pointers for the other init
++ * functions. Passing false will not generate any hardware
++ * reads or writes.
++ *
++ * This function must be called by a driver in order to use the rest
++ * of the 'shared' code files. Called by drivers only.
++ **/
++s32 e1000_setup_init_funcs(struct e1000_hw *hw, bool init_device)
++{
++ s32 ret_val;
++
++ /* Can't do much good without knowing the MAC type. */
++ ret_val = igb_e1000_set_mac_type(hw);
++ if (ret_val) {
++ DEBUGOUT("ERROR: MAC type could not be set properly.\n");
++ goto out;
++ }
++
++ if (!hw->hw_addr) {
++ DEBUGOUT("ERROR: Registers not mapped\n");
++ ret_val = -E1000_ERR_CONFIG;
++ goto out;
++ }
++
++ /*
++ * Init function pointers to generic implementations. We do this first
++ * allowing a driver module to override it afterward.
++ */
++ e1000_init_mac_ops_generic(hw);
++ e1000_init_phy_ops_generic(hw);
++ e1000_init_nvm_ops_generic(hw);
++ e1000_init_mbx_ops_generic(hw);
++
++ /*
++ * Set up the init function pointers. These are functions within the
++ * adapter family file that sets up function pointers for the rest of
++ * the functions in that family.
++ */
++ switch (hw->mac.type) {
++ case e1000_82575:
++ case e1000_82576:
++ case e1000_82580:
++ case e1000_i350:
++ case e1000_i354:
++ e1000_init_function_pointers_82575(hw);
++ break;
++ case e1000_i210:
++ case e1000_i211:
++ e1000_init_function_pointers_i210(hw);
++ break;
++ default:
++ DEBUGOUT("Hardware not supported\n");
++ ret_val = -E1000_ERR_CONFIG;
++ break;
++ }
++
++ /*
++ * Initialize the rest of the function pointers. These require some
++ * register reads/writes in some cases.
++ */
++ if (!(ret_val) && init_device) {
++ ret_val = e1000_init_mac_params(hw);
++ if (ret_val)
++ goto out;
++
++ ret_val = e1000_init_nvm_params(hw);
++ if (ret_val)
++ goto out;
++
++ ret_val = e1000_init_phy_params(hw);
++ if (ret_val)
++ goto out;
++
++ ret_val = e1000_init_mbx_params(hw);
++ if (ret_val)
++ goto out;
++ }
++
++out:
++ return ret_val;
++}
++
++/**
++ * igb_e1000_get_bus_info - Obtain bus information for adapter
++ * @hw: pointer to the HW structure
++ *
++ * This will obtain information about the HW bus for which the
++ * adapter is attached and stores it in the hw structure. This is a
++ * function pointer entry point called by drivers.
++ **/
++
++/* Changed name, duplicated with e1000 */
++s32 igb_e1000_get_bus_info(struct e1000_hw *hw)
++{
++ if (hw->mac.ops.get_bus_info)
++ return hw->mac.ops.get_bus_info(hw);
++
++ return E1000_SUCCESS;
++}
++
++/**
++ * e1000_clear_vfta - Clear VLAN filter table
++ * @hw: pointer to the HW structure
++ *
++ * This clears the VLAN filter table on the adapter. This is a function
++ * pointer entry point called by drivers.
++ **/
++void e1000_clear_vfta(struct e1000_hw *hw)
++{
++ if (hw->mac.ops.clear_vfta)
++ hw->mac.ops.clear_vfta(hw);
++}
++
++/**
++ * igb_e1000_write_vfta - Write value to VLAN filter table
++ * @hw: pointer to the HW structure
++ * @offset: the 32-bit offset in which to write the value to.
++ * @value: the 32-bit value to write at location offset.
++ *
++ * This writes a 32-bit value to a 32-bit offset in the VLAN filter
++ * table. This is a function pointer entry point called by drivers.
++ **/
++/* Changed name, duplicated with e1000 */
++void igb_e1000_write_vfta(struct e1000_hw *hw, u32 offset, u32 value)
++{
++ if (hw->mac.ops.write_vfta)
++ hw->mac.ops.write_vfta(hw, offset, value);
++}
++
++/**
++ * e1000_update_mc_addr_list - Update Multicast addresses
++ * @hw: pointer to the HW structure
++ * @mc_addr_list: array of multicast addresses to program
++ * @mc_addr_count: number of multicast addresses to program
++ *
++ * Updates the Multicast Table Array.
++ * The caller must have a packed mc_addr_list of multicast addresses.
++ **/
++void e1000_update_mc_addr_list(struct e1000_hw *hw, u8 *mc_addr_list,
++ u32 mc_addr_count)
++{
++ if (hw->mac.ops.update_mc_addr_list)
++ hw->mac.ops.update_mc_addr_list(hw, mc_addr_list,
++ mc_addr_count);
++}
++
++/**
++ * igb_e1000_force_mac_fc - Force MAC flow control
++ * @hw: pointer to the HW structure
++ *
++ * Force the MAC's flow control settings. Currently no func pointer exists
++ * and all implementations are handled in the generic version of this
++ * function.
++ **/
++/* Changed name, duplicated with e1000 */
++s32 igb_e1000_force_mac_fc(struct e1000_hw *hw)
++{
++ return e1000_force_mac_fc_generic(hw);
++}
++
++/**
++ * igb_e1000_check_for_link - Check/Store link connection
++ * @hw: pointer to the HW structure
++ *
++ * This checks the link condition of the adapter and stores the
++ * results in the hw->mac structure. This is a function pointer entry
++ * point called by drivers.
++ **/
++/* Changed name, duplicated with e1000 */
++s32 igb_e1000_check_for_link(struct e1000_hw *hw)
++{
++ if (hw->mac.ops.check_for_link)
++ return hw->mac.ops.check_for_link(hw);
++
++ return -E1000_ERR_CONFIG;
++}
++
++/**
++ * e1000_check_mng_mode - Check management mode
++ * @hw: pointer to the HW structure
++ *
++ * This checks if the adapter has manageability enabled.
++ * This is a function pointer entry point called by drivers.
++ **/
++bool e1000_check_mng_mode(struct e1000_hw *hw)
++{
++ if (hw->mac.ops.check_mng_mode)
++ return hw->mac.ops.check_mng_mode(hw);
++
++ return false;
++}
++
++/**
++ * e1000_mng_write_dhcp_info - Writes DHCP info to host interface
++ * @hw: pointer to the HW structure
++ * @buffer: pointer to the host interface
++ * @length: size of the buffer
++ *
++ * Writes the DHCP information to the host interface.
++ **/
++s32 e1000_mng_write_dhcp_info(struct e1000_hw *hw, u8 *buffer, u16 length)
++{
++ return e1000_mng_write_dhcp_info_generic(hw, buffer, length);
++}
++
++/**
++ * igb_e1000_reset_hw - Reset hardware
++ * @hw: pointer to the HW structure
++ *
++ * This resets the hardware into a known state. This is a function pointer
++ * entry point called by drivers.
++ **/
++/* Changed name, duplicated with e1000 */
++s32 igb_e1000_reset_hw(struct e1000_hw *hw)
++{
++ if (hw->mac.ops.reset_hw)
++ return hw->mac.ops.reset_hw(hw);
++
++ return -E1000_ERR_CONFIG;
++}
++
++/**
++ * igb_e1000_init_hw - Initialize hardware
++ * @hw: pointer to the HW structure
++ *
++ * This inits the hardware readying it for operation. This is a function
++ * pointer entry point called by drivers.
++ **/
++/* Changed name, duplicated with e1000 */
++s32 igb_e1000_init_hw(struct e1000_hw *hw)
++{
++ if (hw->mac.ops.init_hw)
++ return hw->mac.ops.init_hw(hw);
++
++ return -E1000_ERR_CONFIG;
++}
++
++/**
++ * igb_e1000_setup_link - Configures link and flow control
++ * @hw: pointer to the HW structure
++ *
++ * This configures link and flow control settings for the adapter. This
++ * is a function pointer entry point called by drivers. While modules can
++ * also call this, they probably call their own version of this function.
++ **/
++/* Changed name, duplicated with e1000 */
++s32 igb_e1000_setup_link(struct e1000_hw *hw)
++{
++ if (hw->mac.ops.setup_link)
++ return hw->mac.ops.setup_link(hw);
++
++ return -E1000_ERR_CONFIG;
++}
++
++/**
++ * igb_e1000_get_speed_and_duplex - Returns current speed and duplex
++ * @hw: pointer to the HW structure
++ * @speed: pointer to a 16-bit value to store the speed
++ * @duplex: pointer to a 16-bit value to store the duplex.
++ *
++ * This returns the speed and duplex of the adapter in the two 'out'
++ * variables passed in. This is a function pointer entry point called
++ * by drivers.
++ **/
++/* Changed name, duplicated with e1000 */
++s32 igb_e1000_get_speed_and_duplex(struct e1000_hw *hw, u16 *speed, u16 *duplex)
++{
++ if (hw->mac.ops.get_link_up_info)
++ return hw->mac.ops.get_link_up_info(hw, speed, duplex);
++
++ return -E1000_ERR_CONFIG;
++}
++
++/**
++ * igb_e1000_setup_led - Configures SW controllable LED
++ * @hw: pointer to the HW structure
++ *
++ * This prepares the SW controllable LED for use and saves the current state
++ * of the LED so it can be later restored. This is a function pointer entry
++ * point called by drivers.
++ **/
++/* Changed name, duplicated with e1000 */
++s32 igb_e1000_setup_led(struct e1000_hw *hw)
++{
++ if (hw->mac.ops.setup_led)
++ return hw->mac.ops.setup_led(hw);
++
++ return E1000_SUCCESS;
++}
++
++/**
++ * igb_e1000_cleanup_led - Restores SW controllable LED
++ * @hw: pointer to the HW structure
++ *
++ * This restores the SW controllable LED to the value saved off by
++ * igb_e1000_setup_led. This is a function pointer entry point called by drivers.
++ **/
++/* Changed name, duplicated with e1000 */
++s32 igb_e1000_cleanup_led(struct e1000_hw *hw)
++{
++ if (hw->mac.ops.cleanup_led)
++ return hw->mac.ops.cleanup_led(hw);
++
++ return E1000_SUCCESS;
++}
++
++/**
++ * e1000_blink_led - Blink SW controllable LED
++ * @hw: pointer to the HW structure
++ *
++ * This starts the adapter LED blinking. Request the LED to be setup first
++ * and cleaned up after. This is a function pointer entry point called by
++ * drivers.
++ **/
++s32 e1000_blink_led(struct e1000_hw *hw)
++{
++ if (hw->mac.ops.blink_led)
++ return hw->mac.ops.blink_led(hw);
++
++ return E1000_SUCCESS;
++}
++
++/**
++ * e1000_id_led_init - store LED configurations in SW
++ * @hw: pointer to the HW structure
++ *
++ * Initializes the LED config in SW. This is a function pointer entry point
++ * called by drivers.
++ **/
++s32 e1000_id_led_init(struct e1000_hw *hw)
++{
++ if (hw->mac.ops.id_led_init)
++ return hw->mac.ops.id_led_init(hw);
++
++ return E1000_SUCCESS;
++}
++
++/**
++ * igb_e1000_led_on - Turn on SW controllable LED
++ * @hw: pointer to the HW structure
++ *
++ * Turns the SW defined LED on. This is a function pointer entry point
++ * called by drivers.
++ **/
++/* Changed name, duplicated with e1000 */
++s32 igb_e1000_led_on(struct e1000_hw *hw)
++{
++ if (hw->mac.ops.led_on)
++ return hw->mac.ops.led_on(hw);
++
++ return E1000_SUCCESS;
++}
++
++/**
++ * igb_e1000_led_off - Turn off SW controllable LED
++ * @hw: pointer to the HW structure
++ *
++ * Turns the SW defined LED off. This is a function pointer entry point
++ * called by drivers.
++ **/
++/* Changed name, duplicated with e1000 */
++s32 igb_e1000_led_off(struct e1000_hw *hw)
++{
++ if (hw->mac.ops.led_off)
++ return hw->mac.ops.led_off(hw);
++
++ return E1000_SUCCESS;
++}
++
++/**
++ * igb_e1000_reset_adaptive - Reset adaptive IFS
++ * @hw: pointer to the HW structure
++ *
++ * Resets the adaptive IFS. Currently no func pointer exists and all
++ * implementations are handled in the generic version of this function.
++ **/
++/* Changed name, duplicated with e1000 */
++void igb_e1000_reset_adaptive(struct e1000_hw *hw)
++{
++ e1000_reset_adaptive_generic(hw);
++}
++
++/**
++ * igb_e1000_update_adaptive - Update adaptive IFS
++ * @hw: pointer to the HW structure
++ *
++ * Updates adapter IFS. Currently no func pointer exists and all
++ * implementations are handled in the generic version of this function.
++ **/
++/* Changed name, duplicated with e1000 */
++void igb_e1000_update_adaptive(struct e1000_hw *hw)
++{
++ e1000_update_adaptive_generic(hw);
++}
++
++/**
++ * e1000_disable_pcie_master - Disable PCI-Express master access
++ * @hw: pointer to the HW structure
++ *
++ * Disables PCI-Express master access and verifies there are no pending
++ * requests. Currently no func pointer exists and all implementations are
++ * handled in the generic version of this function.
++ **/
++s32 e1000_disable_pcie_master(struct e1000_hw *hw)
++{
++ return e1000_disable_pcie_master_generic(hw);
++}
++
++/**
++ * igb_e1000_config_collision_dist - Configure collision distance
++ * @hw: pointer to the HW structure
++ *
++ * Configures the collision distance to the default value and is used
++ * during link setup.
++ **/
++/* Changed name, duplicated with e1000 */
++void igb_e1000_config_collision_dist(struct e1000_hw *hw)
++{
++ if (hw->mac.ops.config_collision_dist)
++ hw->mac.ops.config_collision_dist(hw);
++}
++
++/**
++ * igb_e1000_rar_set - Sets a receive address register
++ * @hw: pointer to the HW structure
++ * @addr: address to set the RAR to
++ * @index: the RAR to set
++ *
++ * Sets a Receive Address Register (RAR) to the specified address.
++ **/
++/* Changed name, duplicated with e1000 */
++int igb_e1000_rar_set(struct e1000_hw *hw, u8 *addr, u32 index)
++{
++ if (hw->mac.ops.rar_set)
++ return hw->mac.ops.rar_set(hw, addr, index);
++
++ return E1000_SUCCESS;
++}
++
++/**
++ * igb_e1000_validate_mdi_setting - Ensures valid MDI/MDIX SW state
++ * @hw: pointer to the HW structure
++ *
++ * Ensures that the MDI/MDIX SW state is valid.
++ **/
++/* Changed name, duplicated with e1000 */
++s32 igb_e1000_validate_mdi_setting(struct e1000_hw *hw)
++{
++ if (hw->mac.ops.validate_mdi_setting)
++ return hw->mac.ops.validate_mdi_setting(hw);
++
++ return E1000_SUCCESS;
++}
++
++/**
++ * igb_e1000_hash_mc_addr - Determines address location in multicast table
++ * @hw: pointer to the HW structure
++ * @mc_addr: Multicast address to hash.
++ *
++ * This hashes an address to determine its location in the multicast
++ * table. Currently no func pointer exists and all implementations
++ * are handled in the generic version of this function.
++ **/
++/* Changed name, duplicated with e1000 */
++u32 igb_e1000_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr)
++{
++ return e1000_hash_mc_addr_generic(hw, mc_addr);
++}
++
++/**
++ * e1000_enable_tx_pkt_filtering - Enable packet filtering on TX
++ * @hw: pointer to the HW structure
++ *
++ * Enables packet filtering on transmit packets if manageability is enabled
++ * and host interface is enabled.
++ * Currently no func pointer exists and all implementations are handled in the
++ * generic version of this function.
++ **/
++bool e1000_enable_tx_pkt_filtering(struct e1000_hw *hw)
++{
++ return e1000_enable_tx_pkt_filtering_generic(hw);
++}
++
++/**
++ * e1000_mng_host_if_write - Writes to the manageability host interface
++ * @hw: pointer to the HW structure
++ * @buffer: pointer to the host interface buffer
++ * @length: size of the buffer
++ * @offset: location in the buffer to write to
++ * @sum: sum of the data (not checksum)
++ *
++ * This function writes the buffer content at the offset given on the host if.
++ * It also does alignment considerations to do the writes in most efficient
++ * way. Also fills up the sum of the buffer in *buffer parameter.
++ **/
++s32 e1000_mng_host_if_write(struct e1000_hw *hw, u8 *buffer, u16 length,
++ u16 offset, u8 *sum)
++{
++ return e1000_mng_host_if_write_generic(hw, buffer, length, offset, sum);
++}
++
++/**
++ * e1000_mng_write_cmd_header - Writes manageability command header
++ * @hw: pointer to the HW structure
++ * @hdr: pointer to the host interface command header
++ *
++ * Writes the command header after does the checksum calculation.
++ **/
++s32 e1000_mng_write_cmd_header(struct e1000_hw *hw,
++ struct e1000_host_mng_command_header *hdr)
++{
++ return e1000_mng_write_cmd_header_generic(hw, hdr);
++}
++
++/**
++ * e1000_mng_enable_host_if - Checks host interface is enabled
++ * @hw: pointer to the HW structure
++ *
++ * Returns E1000_success upon success, else E1000_ERR_HOST_INTERFACE_COMMAND
++ *
++ * This function checks whether the HOST IF is enabled for command operation
++ * and also checks whether the previous command is completed. It busy waits
++ * in case of previous command is not completed.
++ **/
++s32 e1000_mng_enable_host_if(struct e1000_hw *hw)
++{
++ return e1000_mng_enable_host_if_generic(hw);
++}
++
++/**
++ * e1000_check_reset_block - Verifies PHY can be reset
++ * @hw: pointer to the HW structure
++ *
++ * Checks if the PHY is in a state that can be reset or if manageability
++ * has it tied up. This is a function pointer entry point called by drivers.
++ **/
++s32 e1000_check_reset_block(struct e1000_hw *hw)
++{
++ if (hw->phy.ops.check_reset_block)
++ return hw->phy.ops.check_reset_block(hw);
++
++ return E1000_SUCCESS;
++}
++
++/**
++ * igb_e1000_read_phy_reg - Reads PHY register
++ * @hw: pointer to the HW structure
++ * @offset: the register to read
++ * @data: the buffer to store the 16-bit read.
++ *
++ * Reads the PHY register and returns the value in data.
++ * This is a function pointer entry point called by drivers.
++ **/
++/* Changed name, duplicated with e1000 */
++s32 igb_e1000_read_phy_reg(struct e1000_hw *hw, u32 offset, u16 *data)
++{
++ if (hw->phy.ops.read_reg)
++ return hw->phy.ops.read_reg(hw, offset, data);
++
++ return E1000_SUCCESS;
++}
++
++/**
++ * igb_e1000_write_phy_reg - Writes PHY register
++ * @hw: pointer to the HW structure
++ * @offset: the register to write
++ * @data: the value to write.
++ *
++ * Writes the PHY register at offset with the value in data.
++ * This is a function pointer entry point called by drivers.
++ **/
++/* Changed name, duplicated with e1000 */
++s32 igb_e1000_write_phy_reg(struct e1000_hw *hw, u32 offset, u16 data)
++{
++ if (hw->phy.ops.write_reg)
++ return hw->phy.ops.write_reg(hw, offset, data);
++
++ return E1000_SUCCESS;
++}
++
++/**
++ * e1000_release_phy - Generic release PHY
++ * @hw: pointer to the HW structure
++ *
++ * Return if silicon family does not require a semaphore when accessing the
++ * PHY.
++ **/
++void e1000_release_phy(struct e1000_hw *hw)
++{
++ if (hw->phy.ops.release)
++ hw->phy.ops.release(hw);
++}
++
++/**
++ * e1000_acquire_phy - Generic acquire PHY
++ * @hw: pointer to the HW structure
++ *
++ * Return success if silicon family does not require a semaphore when
++ * accessing the PHY.
++ **/
++s32 e1000_acquire_phy(struct e1000_hw *hw)
++{
++ if (hw->phy.ops.acquire)
++ return hw->phy.ops.acquire(hw);
++
++ return E1000_SUCCESS;
++}
++
++/**
++ * e1000_read_kmrn_reg - Reads register using Kumeran interface
++ * @hw: pointer to the HW structure
++ * @offset: the register to read
++ * @data: the location to store the 16-bit value read.
++ *
++ * Reads a register out of the Kumeran interface. Currently no func pointer
++ * exists and all implementations are handled in the generic version of
++ * this function.
++ **/
++s32 e1000_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data)
++{
++ return e1000_read_kmrn_reg_generic(hw, offset, data);
++}
++
++/**
++ * e1000_write_kmrn_reg - Writes register using Kumeran interface
++ * @hw: pointer to the HW structure
++ * @offset: the register to write
++ * @data: the value to write.
++ *
++ * Writes a register to the Kumeran interface. Currently no func pointer
++ * exists and all implementations are handled in the generic version of
++ * this function.
++ **/
++s32 e1000_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data)
++{
++ return e1000_write_kmrn_reg_generic(hw, offset, data);
++}
++
++/**
++ * e1000_get_cable_length - Retrieves cable length estimation
++ * @hw: pointer to the HW structure
++ *
++ * This function estimates the cable length and stores them in
++ * hw->phy.min_length and hw->phy.max_length. This is a function pointer
++ * entry point called by drivers.
++ **/
++s32 e1000_get_cable_length(struct e1000_hw *hw)
++{
++ if (hw->phy.ops.get_cable_length)
++ return hw->phy.ops.get_cable_length(hw);
++
++ return E1000_SUCCESS;
++}
++
++/**
++ * e1000_get_phy_info - Retrieves PHY information from registers
++ * @hw: pointer to the HW structure
++ *
++ * This function gets some information from various PHY registers and
++ * populates hw->phy values with it. This is a function pointer entry
++ * point called by drivers.
++ **/
++s32 e1000_get_phy_info(struct e1000_hw *hw)
++{
++ if (hw->phy.ops.get_info)
++ return hw->phy.ops.get_info(hw);
++
++ return E1000_SUCCESS;
++}
++
++/**
++ * igb_e1000_phy_hw_reset - Hard PHY reset
++ * @hw: pointer to the HW structure
++ *
++ * Performs a hard PHY reset. This is a function pointer entry point called
++ * by drivers.
++ **/
++/* Changed name, duplicated with e1000 */
++s32 igb_e1000_phy_hw_reset(struct e1000_hw *hw)
++{
++ if (hw->phy.ops.reset)
++ return hw->phy.ops.reset(hw);
++
++ return E1000_SUCCESS;
++}
++
++/**
++ * e1000_phy_commit - Soft PHY reset
++ * @hw: pointer to the HW structure
++ *
++ * Performs a soft PHY reset on those that apply. This is a function pointer
++ * entry point called by drivers.
++ **/
++s32 e1000_phy_commit(struct e1000_hw *hw)
++{
++ if (hw->phy.ops.commit)
++ return hw->phy.ops.commit(hw);
++
++ return E1000_SUCCESS;
++}
++
++/**
++ * e1000_set_d0_lplu_state - Sets low power link up state for D0
++ * @hw: pointer to the HW structure
++ * @active: boolean used to enable/disable lplu
++ *
++ * Success returns 0, Failure returns 1
++ *
++ * The low power link up (lplu) state is set to the power management level D0
++ * and SmartSpeed is disabled when active is true, else clear lplu for D0
++ * and enable Smartspeed. LPLU and Smartspeed are mutually exclusive. LPLU
++ * is used during Dx states where the power conservation is most important.
++ * During driver activity, SmartSpeed should be enabled so performance is
++ * maintained. This is a function pointer entry point called by drivers.
++ **/
++s32 e1000_set_d0_lplu_state(struct e1000_hw *hw, bool active)
++{
++ if (hw->phy.ops.set_d0_lplu_state)
++ return hw->phy.ops.set_d0_lplu_state(hw, active);
++
++ return E1000_SUCCESS;
++}
++
++/**
++ * e1000_set_d3_lplu_state - Sets low power link up state for D3
++ * @hw: pointer to the HW structure
++ * @active: boolean used to enable/disable lplu
++ *
++ * Success returns 0, Failure returns 1
++ *
++ * The low power link up (lplu) state is set to the power management level D3
++ * and SmartSpeed is disabled when active is true, else clear lplu for D3
++ * and enable Smartspeed. LPLU and Smartspeed are mutually exclusive. LPLU
++ * is used during Dx states where the power conservation is most important.
++ * During driver activity, SmartSpeed should be enabled so performance is
++ * maintained. This is a function pointer entry point called by drivers.
++ **/
++s32 e1000_set_d3_lplu_state(struct e1000_hw *hw, bool active)
++{
++ if (hw->phy.ops.set_d3_lplu_state)
++ return hw->phy.ops.set_d3_lplu_state(hw, active);
++
++ return E1000_SUCCESS;
++}
++
++/**
++ * igb_e1000_read_mac_addr - Reads MAC address
++ * @hw: pointer to the HW structure
++ *
++ * Reads the MAC address out of the adapter and stores it in the HW structure.
++ * Currently no func pointer exists and all implementations are handled in the
++ * generic version of this function.
++ **/
++/* Changed name, duplicated with e1000 */
++s32 igb_e1000_read_mac_addr(struct e1000_hw *hw)
++{
++ if (hw->mac.ops.read_mac_addr)
++ return hw->mac.ops.read_mac_addr(hw);
++
++ return igb_e1000_read_mac_addr_generic(hw);
++}
++
++/**
++ * e1000_read_pba_string - Read device part number string
++ * @hw: pointer to the HW structure
++ * @pba_num: pointer to device part number
++ * @pba_num_size: size of part number buffer
++ *
++ * Reads the product board assembly (PBA) number from the EEPROM and stores
++ * the value in pba_num.
++ * Currently no func pointer exists and all implementations are handled in the
++ * generic version of this function.
++ **/
++s32 e1000_read_pba_string(struct e1000_hw *hw, u8 *pba_num, u32 pba_num_size)
++{
++ return igb_e1000_read_pba_string_generic(hw, pba_num, pba_num_size);
++}
++
++/**
++ * e1000_read_pba_length - Read device part number string length
++ * @hw: pointer to the HW structure
++ * @pba_num_size: size of part number buffer
++ *
++ * Reads the product board assembly (PBA) number length from the EEPROM and
++ * stores the value in pba_num.
++ * Currently no func pointer exists and all implementations are handled in the
++ * generic version of this function.
++ **/
++s32 e1000_read_pba_length(struct e1000_hw *hw, u32 *pba_num_size)
++{
++ return e1000_read_pba_length_generic(hw, pba_num_size);
++}
++
++/**
++ * e1000_validate_nvm_checksum - Verifies NVM (EEPROM) checksum
++ * @hw: pointer to the HW structure
++ *
++ * Validates the NVM checksum is correct. This is a function pointer entry
++ * point called by drivers.
++ **/
++s32 e1000_validate_nvm_checksum(struct e1000_hw *hw)
++{
++ if (hw->nvm.ops.validate)
++ return hw->nvm.ops.validate(hw);
++
++ return -E1000_ERR_CONFIG;
++}
++
++/**
++ * e1000_update_nvm_checksum - Updates NVM (EEPROM) checksum
++ * @hw: pointer to the HW structure
++ *
++ * Updates the NVM checksum. Currently no func pointer exists and all
++ * implementations are handled in the generic version of this function.
++ **/
++s32 e1000_update_nvm_checksum(struct e1000_hw *hw)
++{
++ if (hw->nvm.ops.update)
++ return hw->nvm.ops.update(hw);
++
++ return -E1000_ERR_CONFIG;
++}
++
++/**
++ * e1000_reload_nvm - Reloads EEPROM
++ * @hw: pointer to the HW structure
++ *
++ * Reloads the EEPROM by setting the "Reinitialize from EEPROM" bit in the
++ * extended control register.
++ **/
++void e1000_reload_nvm(struct e1000_hw *hw)
++{
++ if (hw->nvm.ops.reload)
++ hw->nvm.ops.reload(hw);
++}
++
++/**
++ * e1000_read_nvm - Reads NVM (EEPROM)
++ * @hw: pointer to the HW structure
++ * @offset: the word offset to read
++ * @words: number of 16-bit words to read
++ * @data: pointer to the properly sized buffer for the data.
++ *
++ * Reads 16-bit chunks of data from the NVM (EEPROM). This is a function
++ * pointer entry point called by drivers.
++ **/
++s32 e1000_read_nvm(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
++{
++ if (hw->nvm.ops.read)
++ return hw->nvm.ops.read(hw, offset, words, data);
++
++ return -E1000_ERR_CONFIG;
++}
++
++/**
++ * e1000_write_nvm - Writes to NVM (EEPROM)
++ * @hw: pointer to the HW structure
++ * @offset: the word offset to read
++ * @words: number of 16-bit words to write
++ * @data: pointer to the properly sized buffer for the data.
++ *
++ * Writes 16-bit chunks of data to the NVM (EEPROM). This is a function
++ * pointer entry point called by drivers.
++ **/
++s32 e1000_write_nvm(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
++{
++ if (hw->nvm.ops.write)
++ return hw->nvm.ops.write(hw, offset, words, data);
++
++ return E1000_SUCCESS;
++}
++
++/**
++ * e1000_write_8bit_ctrl_reg - Writes 8bit Control register
++ * @hw: pointer to the HW structure
++ * @reg: 32bit register offset
++ * @offset: the register to write
++ * @data: the value to write.
++ *
++ * Writes the PHY register at offset with the value in data.
++ * This is a function pointer entry point called by drivers.
++ **/
++s32 e1000_write_8bit_ctrl_reg(struct e1000_hw *hw, u32 reg, u32 offset,
++ u8 data)
++{
++ return e1000_write_8bit_ctrl_reg_generic(hw, reg, offset, data);
++}
++
++/**
++ * igb_e1000_power_up_phy - Restores link in case of PHY power down
++ * @hw: pointer to the HW structure
++ *
++ * The phy may be powered down to save power, to turn off link when the
++ * driver is unloaded, or wake on lan is not enabled (among others).
++ **/
++/* Changed name, duplicated with e1000 */
++void igb_e1000_power_up_phy(struct e1000_hw *hw)
++{
++ if (hw->phy.ops.power_up)
++ hw->phy.ops.power_up(hw);
++
++ igb_e1000_setup_link(hw);
++}
++
++/**
++ * e1000_power_down_phy - Power down PHY
++ * @hw: pointer to the HW structure
++ *
++ * The phy may be powered down to save power, to turn off link when the
++ * driver is unloaded, or wake on lan is not enabled (among others).
++ **/
++void e1000_power_down_phy(struct e1000_hw *hw)
++{
++ if (hw->phy.ops.power_down)
++ hw->phy.ops.power_down(hw);
++}
++
++/**
++ * e1000_power_up_fiber_serdes_link - Power up serdes link
++ * @hw: pointer to the HW structure
++ *
++ * Power on the optics and PCS.
++ **/
++void e1000_power_up_fiber_serdes_link(struct e1000_hw *hw)
++{
++ if (hw->mac.ops.power_up_serdes)
++ hw->mac.ops.power_up_serdes(hw);
++}
++
++/**
++ * e1000_shutdown_fiber_serdes_link - Remove link during power down
++ * @hw: pointer to the HW structure
++ *
++ * Shutdown the optics and PCS on driver unload.
++ **/
++void e1000_shutdown_fiber_serdes_link(struct e1000_hw *hw)
++{
++ if (hw->mac.ops.shutdown_serdes)
++ hw->mac.ops.shutdown_serdes(hw);
++}
++
++/**
++ * e1000_get_thermal_sensor_data - Gathers thermal sensor data
++ * @hw: pointer to hardware structure
++ *
++ * Updates the temperatures in mac.thermal_sensor_data
++ **/
++s32 e1000_get_thermal_sensor_data(struct e1000_hw *hw)
++{
++ if (hw->mac.ops.get_thermal_sensor_data)
++ return hw->mac.ops.get_thermal_sensor_data(hw);
++
++ return E1000_SUCCESS;
++}
++
++/**
++ * e1000_init_thermal_sensor_thresh - Sets thermal sensor thresholds
++ * @hw: pointer to hardware structure
++ *
++ * Sets the thermal sensor thresholds according to the NVM map
++ **/
++s32 e1000_init_thermal_sensor_thresh(struct e1000_hw *hw)
++{
++ if (hw->mac.ops.init_thermal_sensor_thresh)
++ return hw->mac.ops.init_thermal_sensor_thresh(hw);
++
++ return E1000_SUCCESS;
++}
++
+diff -Nu a/drivers/net/ethernet/intel/igb/e1000_api.h b/drivers/net/ethernet/intel/igb/e1000_api.h
+--- a/drivers/net/ethernet/intel/igb/e1000_api.h 1970-01-01 00:00:00.000000000 +0000
++++ b/drivers/net/ethernet/intel/igb/e1000_api.h 2016-11-14 14:32:08.579567168 +0000
+@@ -0,0 +1,152 @@
++/*******************************************************************************
++
++ Intel(R) Gigabit Ethernet Linux driver
++ Copyright(c) 2007-2015 Intel Corporation.
++
++ This program is free software; you can redistribute it and/or modify it
++ under the terms and conditions of the GNU General Public License,
++ version 2, as published by the Free Software Foundation.
++
++ This program is distributed in the hope it will be useful, but WITHOUT
++ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ more details.
++
++ The full GNU General Public License is included in this distribution in
++ the file called "COPYING".
++
++ Contact Information:
++ Linux NICS
++ e1000-devel Mailing List
++ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
++
++*******************************************************************************/
++
++#ifndef _E1000_API_H_
++#define _E1000_API_H_
++
++#include "e1000_hw.h"
++
++extern void e1000_init_function_pointers_82575(struct e1000_hw *hw);
++extern void e1000_rx_fifo_flush_82575(struct e1000_hw *hw);
++extern void e1000_init_function_pointers_vf(struct e1000_hw *hw);
++extern void e1000_power_up_fiber_serdes_link(struct e1000_hw *hw);
++extern void e1000_shutdown_fiber_serdes_link(struct e1000_hw *hw);
++extern void e1000_init_function_pointers_i210(struct e1000_hw *hw);
++
++s32 e1000_set_obff_timer(struct e1000_hw *hw, u32 itr);
++s32 igb_e1000_set_mac_type(struct e1000_hw *hw);
++s32 e1000_setup_init_funcs(struct e1000_hw *hw, bool init_device);
++s32 e1000_init_mac_params(struct e1000_hw *hw);
++s32 e1000_init_nvm_params(struct e1000_hw *hw);
++s32 e1000_init_phy_params(struct e1000_hw *hw);
++s32 e1000_init_mbx_params(struct e1000_hw *hw);
++s32 igb_e1000_get_bus_info(struct e1000_hw *hw);
++void e1000_clear_vfta(struct e1000_hw *hw);
++void igb_e1000_write_vfta(struct e1000_hw *hw, u32 offset, u32 value);
++s32 igb_e1000_force_mac_fc(struct e1000_hw *hw);
++s32 igb_e1000_check_for_link(struct e1000_hw *hw);
++s32 igb_e1000_reset_hw(struct e1000_hw *hw);
++s32 igb_e1000_init_hw(struct e1000_hw *hw);
++s32 igb_e1000_setup_link(struct e1000_hw *hw);
++s32 igb_e1000_get_speed_and_duplex(struct e1000_hw *hw, u16 *speed, u16 *duplex);
++s32 e1000_disable_pcie_master(struct e1000_hw *hw);
++void igb_e1000_config_collision_dist(struct e1000_hw *hw);
++int igb_e1000_rar_set(struct e1000_hw *hw, u8 *addr, u32 index);
++u32 igb_e1000_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr);
++void e1000_update_mc_addr_list(struct e1000_hw *hw, u8 *mc_addr_list,
++ u32 mc_addr_count);
++s32 igb_e1000_setup_led(struct e1000_hw *hw);
++s32 igb_e1000_cleanup_led(struct e1000_hw *hw);
++s32 e1000_check_reset_block(struct e1000_hw *hw);
++s32 e1000_blink_led(struct e1000_hw *hw);
++s32 igb_e1000_led_on(struct e1000_hw *hw);
++s32 igb_e1000_led_off(struct e1000_hw *hw);
++s32 e1000_id_led_init(struct e1000_hw *hw);
++void igb_e1000_reset_adaptive(struct e1000_hw *hw);
++void igb_e1000_update_adaptive(struct e1000_hw *hw);
++s32 e1000_get_cable_length(struct e1000_hw *hw);
++s32 igb_e1000_validate_mdi_setting(struct e1000_hw *hw);
++s32 igb_e1000_read_phy_reg(struct e1000_hw *hw, u32 offset, u16 *data);
++s32 igb_e1000_write_phy_reg(struct e1000_hw *hw, u32 offset, u16 data);
++s32 e1000_write_8bit_ctrl_reg(struct e1000_hw *hw, u32 reg, u32 offset,
++ u8 data);
++s32 e1000_get_phy_info(struct e1000_hw *hw);
++void e1000_release_phy(struct e1000_hw *hw);
++s32 e1000_acquire_phy(struct e1000_hw *hw);
++s32 igb_e1000_phy_hw_reset(struct e1000_hw *hw);
++s32 e1000_phy_commit(struct e1000_hw *hw);
++void igb_e1000_power_up_phy(struct e1000_hw *hw);
++void e1000_power_down_phy(struct e1000_hw *hw);
++s32 igb_e1000_read_mac_addr(struct e1000_hw *hw);
++s32 e1000_read_pba_string(struct e1000_hw *hw, u8 *pba_num, u32 pba_num_size);
++s32 e1000_read_pba_length(struct e1000_hw *hw, u32 *pba_num_size);
++void e1000_reload_nvm(struct e1000_hw *hw);
++s32 e1000_update_nvm_checksum(struct e1000_hw *hw);
++s32 e1000_validate_nvm_checksum(struct e1000_hw *hw);
++s32 e1000_read_nvm(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
++s32 e1000_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data);
++s32 e1000_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data);
++s32 e1000_write_nvm(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
++s32 e1000_set_d3_lplu_state(struct e1000_hw *hw, bool active);
++s32 e1000_set_d0_lplu_state(struct e1000_hw *hw, bool active);
++bool e1000_check_mng_mode(struct e1000_hw *hw);
++bool e1000_enable_tx_pkt_filtering(struct e1000_hw *hw);
++s32 e1000_mng_enable_host_if(struct e1000_hw *hw);
++s32 e1000_mng_host_if_write(struct e1000_hw *hw, u8 *buffer, u16 length,
++ u16 offset, u8 *sum);
++s32 e1000_mng_write_cmd_header(struct e1000_hw *hw,
++ struct e1000_host_mng_command_header *hdr);
++s32 e1000_mng_write_dhcp_info(struct e1000_hw *hw, u8 *buffer, u16 length);
++s32 e1000_get_thermal_sensor_data(struct e1000_hw *hw);
++s32 e1000_init_thermal_sensor_thresh(struct e1000_hw *hw);
++
++/*
++ * TBI_ACCEPT macro definition:
++ *
++ * This macro requires:
++ * a = a pointer to struct e1000_hw
++ * status = the 8 bit status field of the Rx descriptor with EOP set
++ * errors = the 8 bit error field of the Rx descriptor with EOP set
++ * length = the sum of all the length fields of the Rx descriptors that
++ * make up the current frame
++ * last_byte = the last byte of the frame DMAed by the hardware
++ * min_frame_size = the minimum frame length we want to accept.
++ * max_frame_size = the maximum frame length we want to accept.
++ *
++ * This macro is a conditional that should be used in the interrupt
++ * handler's Rx processing routine when RxErrors have been detected.
++ *
++ * Typical use:
++ * ...
++ * if (TBI_ACCEPT) {
++ * accept_frame = true;
++ * e1000_tbi_adjust_stats(adapter, MacAddress);
++ * frame_length--;
++ * } else {
++ * accept_frame = false;
++ * }
++ * ...
++ */
++
++/* The carrier extension symbol, as received by the NIC. */
++#define CARRIER_EXTENSION 0x0F
++
++#define TBI_ACCEPT(a, status, errors, length, last_byte, \
++ min_frame_size, max_frame_size) \
++ (e1000_tbi_sbp_enabled_82543(a) && \
++ (((errors) & E1000_RXD_ERR_FRAME_ERR_MASK) == E1000_RXD_ERR_CE) && \
++ ((last_byte) == CARRIER_EXTENSION) && \
++ (((status) & E1000_RXD_STAT_VP) ? \
++ (((length) > ((min_frame_size) - VLAN_TAG_SIZE)) && \
++ ((length) <= ((max_frame_size) + 1))) : \
++ (((length) > (min_frame_size)) && \
++ ((length) <= ((max_frame_size) + VLAN_TAG_SIZE + 1)))))
++
++#ifndef E1000_MAX
++#define E1000_MAX(a, b) ((a) > (b) ? (a) : (b))
++#endif
++#ifndef E1000_DIVIDE_ROUND_UP
++#define E1000_DIVIDE_ROUND_UP(a, b) (((a) + (b) - 1) / (b)) /* ceil(a/b) */
++#endif
++#endif /* _E1000_API_H_ */
+diff -Nu a/drivers/net/ethernet/intel/igb/e1000_defines.h b/drivers/net/ethernet/intel/igb/e1000_defines.h
+--- a/drivers/net/ethernet/intel/igb/e1000_defines.h 2016-11-13 09:20:24.790171605 +0000
++++ b/drivers/net/ethernet/intel/igb/e1000_defines.h 2016-11-14 14:32:08.579567168 +0000
+@@ -1,25 +1,26 @@
+-/* Intel(R) Gigabit Ethernet Linux driver
+- * Copyright(c) 2007-2014 Intel Corporation.
+- *
+- * This program is free software; you can redistribute it and/or modify it
+- * under the terms and conditions of the GNU General Public License,
+- * version 2, as published by the Free Software Foundation.
+- *
+- * This program is distributed in the hope it will be useful, but WITHOUT
+- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+- * more details.
+- *
+- * You should have received a copy of the GNU General Public License along with
+- * this program; if not, see .
+- *
+- * The full GNU General Public License is included in this distribution in
+- * the file called "COPYING".
+- *
+- * Contact Information:
+- * e1000-devel Mailing List
+- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+- */
++/*******************************************************************************
++
++ Intel(R) Gigabit Ethernet Linux driver
++ Copyright(c) 2007-2015 Intel Corporation.
++
++ This program is free software; you can redistribute it and/or modify it
++ under the terms and conditions of the GNU General Public License,
++ version 2, as published by the Free Software Foundation.
++
++ This program is distributed in the hope it will be useful, but WITHOUT
++ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ more details.
++
++ The full GNU General Public License is included in this distribution in
++ the file called "COPYING".
++
++ Contact Information:
++ Linux NICS
++ e1000-devel Mailing List
++ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
++
++*******************************************************************************/
+
+ #ifndef _E1000_DEFINES_H_
+ #define _E1000_DEFINES_H_
+@@ -30,38 +31,55 @@
+
+ /* Definitions for power management and wakeup registers */
+ /* Wake Up Control */
+-#define E1000_WUC_PME_EN 0x00000002 /* PME Enable */
++#define E1000_WUC_APME 0x00000001 /* APM Enable */
++#define E1000_WUC_PME_EN 0x00000002 /* PME Enable */
++#define E1000_WUC_PME_STATUS 0x00000004 /* PME Status */
++#define E1000_WUC_APMPME 0x00000008 /* Assert PME on APM Wakeup */
++#define E1000_WUC_PHY_WAKE 0x00000100 /* if PHY supports wakeup */
+
+ /* Wake Up Filter Control */
+-#define E1000_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */
+-#define E1000_WUFC_MAG 0x00000002 /* Magic Packet Wakeup Enable */
+-#define E1000_WUFC_EX 0x00000004 /* Directed Exact Wakeup Enable */
+-#define E1000_WUFC_MC 0x00000008 /* Directed Multicast Wakeup Enable */
+-#define E1000_WUFC_BC 0x00000010 /* Broadcast Wakeup Enable */
++#define E1000_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */
++#define E1000_WUFC_MAG 0x00000002 /* Magic Packet Wakeup Enable */
++#define E1000_WUFC_EX 0x00000004 /* Directed Exact Wakeup Enable */
++#define E1000_WUFC_MC 0x00000008 /* Directed Multicast Wakeup Enable */
++#define E1000_WUFC_BC 0x00000010 /* Broadcast Wakeup Enable */
++#define E1000_WUFC_ARP 0x00000020 /* ARP Request Packet Wakeup Enable */
++#define E1000_WUFC_IPV4 0x00000040 /* Directed IPv4 Packet Wakeup Enable */
++#define E1000_WUFC_FLX0 0x00010000 /* Flexible Filter 0 Enable */
++
++/* Wake Up Status */
++#define E1000_WUS_LNKC E1000_WUFC_LNKC
++#define E1000_WUS_MAG E1000_WUFC_MAG
++#define E1000_WUS_EX E1000_WUFC_EX
++#define E1000_WUS_MC E1000_WUFC_MC
++#define E1000_WUS_BC E1000_WUFC_BC
+
+ /* Extended Device Control */
+-#define E1000_CTRL_EXT_SDP2_DATA 0x00000040 /* Value of SW Defineable Pin 2 */
+-#define E1000_CTRL_EXT_SDP3_DATA 0x00000080 /* Value of SW Defineable Pin 3 */
+-#define E1000_CTRL_EXT_SDP2_DIR 0x00000400 /* SDP2 Data direction */
+-#define E1000_CTRL_EXT_SDP3_DIR 0x00000800 /* SDP3 Data direction */
+-
++#define E1000_CTRL_EXT_LPCD 0x00000004 /* LCD Power Cycle Done */
++#define E1000_CTRL_EXT_SDP4_DATA 0x00000010 /* SW Definable Pin 4 data */
++#define E1000_CTRL_EXT_SDP6_DATA 0x00000040 /* SW Definable Pin 6 data */
++#define E1000_CTRL_EXT_SDP3_DATA 0x00000080 /* SW Definable Pin 3 data */
++#define E1000_CTRL_EXT_SDP6_DIR 0x00000400 /* Direction of SDP6 0=in 1=out */
++#define E1000_CTRL_EXT_SDP3_DIR 0x00000800 /* Direction of SDP3 0=in 1=out */
++#define E1000_CTRL_EXT_FORCE_SMBUS 0x00000800 /* Force SMBus mode */
++#define E1000_CTRL_EXT_EE_RST 0x00002000 /* Reinitialize from EEPROM */
+ /* Physical Func Reset Done Indication */
+ #define E1000_CTRL_EXT_PFRSTD 0x00004000
+ #define E1000_CTRL_EXT_SDLPE 0X00040000 /* SerDes Low Power Enable */
++#define E1000_CTRL_EXT_SPD_BYPS 0x00008000 /* Speed Select Bypass */
++#define E1000_CTRL_EXT_RO_DIS 0x00020000 /* Relaxed Ordering disable */
++#define E1000_CTRL_EXT_DMA_DYN_CLK_EN 0x00080000 /* DMA Dynamic Clk Gating */
+ #define E1000_CTRL_EXT_LINK_MODE_MASK 0x00C00000
+-#define E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES 0x00C00000
++/* Offset of the link mode field in Ctrl Ext register */
++#define E1000_CTRL_EXT_LINK_MODE_OFFSET 22
+ #define E1000_CTRL_EXT_LINK_MODE_1000BASE_KX 0x00400000
+-#define E1000_CTRL_EXT_LINK_MODE_SGMII 0x00800000
+ #define E1000_CTRL_EXT_LINK_MODE_GMII 0x00000000
+-#define E1000_CTRL_EXT_EIAME 0x01000000
++#define E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES 0x00C00000
++#define E1000_CTRL_EXT_LINK_MODE_SGMII 0x00800000
++#define E1000_CTRL_EXT_EIAME 0x01000000
+ #define E1000_CTRL_EXT_IRCA 0x00000001
+-/* Interrupt delay cancellation */
+-/* Driver loaded bit for FW */
+-#define E1000_CTRL_EXT_DRV_LOAD 0x10000000
+-/* Interrupt acknowledge Auto-mask */
+-/* Clear Interrupt timers after IMS clear */
+-/* packet buffer parity error detection enabled */
+-/* descriptor FIFO parity error detection enable */
++#define E1000_CTRL_EXT_DRV_LOAD 0x10000000 /* Drv loaded bit for FW */
++#define E1000_CTRL_EXT_IAME 0x08000000 /* Int ACK Auto-mask */
+ #define E1000_CTRL_EXT_PBA_CLR 0x80000000 /* PBA Clear */
+ #define E1000_CTRL_EXT_PHYPDEN 0x00100000
+ #define E1000_I2CCMD_REG_ADDR_SHIFT 16
+@@ -74,322 +92,446 @@
+ #define E1000_I2CCMD_SFP_DIAG_ADDR(a) (0x0100 + (a))
+ #define E1000_MAX_SGMII_PHY_REG_ADDR 255
+ #define E1000_I2CCMD_PHY_TIMEOUT 200
+-#define E1000_IVAR_VALID 0x80
+-#define E1000_GPIE_NSICR 0x00000001
+-#define E1000_GPIE_MSIX_MODE 0x00000010
+-#define E1000_GPIE_EIAME 0x40000000
+-#define E1000_GPIE_PBA 0x80000000
++#define E1000_IVAR_VALID 0x80
++#define E1000_GPIE_NSICR 0x00000001
++#define E1000_GPIE_MSIX_MODE 0x00000010
++#define E1000_GPIE_EIAME 0x40000000
++#define E1000_GPIE_PBA 0x80000000
+
+ /* Receive Descriptor bit definitions */
+-#define E1000_RXD_STAT_DD 0x01 /* Descriptor Done */
+-#define E1000_RXD_STAT_EOP 0x02 /* End of Packet */
+-#define E1000_RXD_STAT_IXSM 0x04 /* Ignore checksum */
+-#define E1000_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */
+-#define E1000_RXD_STAT_UDPCS 0x10 /* UDP xsum calculated */
+-#define E1000_RXD_STAT_TCPCS 0x20 /* TCP xsum calculated */
+-#define E1000_RXD_STAT_TS 0x10000 /* Pkt was time stamped */
+-
+-#define E1000_RXDEXT_STATERR_LB 0x00040000
+-#define E1000_RXDEXT_STATERR_CE 0x01000000
+-#define E1000_RXDEXT_STATERR_SE 0x02000000
+-#define E1000_RXDEXT_STATERR_SEQ 0x04000000
+-#define E1000_RXDEXT_STATERR_CXE 0x10000000
+-#define E1000_RXDEXT_STATERR_TCPE 0x20000000
+-#define E1000_RXDEXT_STATERR_IPE 0x40000000
+-#define E1000_RXDEXT_STATERR_RXE 0x80000000
++#define E1000_RXD_STAT_DD 0x01 /* Descriptor Done */
++#define E1000_RXD_STAT_EOP 0x02 /* End of Packet */
++#define E1000_RXD_STAT_IXSM 0x04 /* Ignore checksum */
++#define E1000_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */
++#define E1000_RXD_STAT_UDPCS 0x10 /* UDP xsum calculated */
++#define E1000_RXD_STAT_TCPCS 0x20 /* TCP xsum calculated */
++#define E1000_RXD_STAT_IPCS 0x40 /* IP xsum calculated */
++#define E1000_RXD_STAT_PIF 0x80 /* passed in-exact filter */
++#define E1000_RXD_STAT_IPIDV 0x200 /* IP identification valid */
++#define E1000_RXD_STAT_UDPV 0x400 /* Valid UDP checksum */
++#define E1000_RXD_STAT_DYNINT 0x800 /* Pkt caused INT via DYNINT */
++#define E1000_RXD_ERR_CE 0x01 /* CRC Error */
++#define E1000_RXD_ERR_SE 0x02 /* Symbol Error */
++#define E1000_RXD_ERR_SEQ 0x04 /* Sequence Error */
++#define E1000_RXD_ERR_CXE 0x10 /* Carrier Extension Error */
++#define E1000_RXD_ERR_TCPE 0x20 /* TCP/UDP Checksum Error */
++#define E1000_RXD_ERR_IPE 0x40 /* IP Checksum Error */
++#define E1000_RXD_ERR_RXE 0x80 /* Rx Data Error */
++#define E1000_RXD_SPC_VLAN_MASK 0x0FFF /* VLAN ID is in lower 12 bits */
++
++#define E1000_RXDEXT_STATERR_TST 0x00000100 /* Time Stamp taken */
++#define E1000_RXDEXT_STATERR_LB 0x00040000
++#define E1000_RXDEXT_STATERR_CE 0x01000000
++#define E1000_RXDEXT_STATERR_SE 0x02000000
++#define E1000_RXDEXT_STATERR_SEQ 0x04000000
++#define E1000_RXDEXT_STATERR_CXE 0x10000000
++#define E1000_RXDEXT_STATERR_TCPE 0x20000000
++#define E1000_RXDEXT_STATERR_IPE 0x40000000
++#define E1000_RXDEXT_STATERR_RXE 0x80000000
++
++/* mask to determine if packets should be dropped due to frame errors */
++#define E1000_RXD_ERR_FRAME_ERR_MASK ( \
++ E1000_RXD_ERR_CE | \
++ E1000_RXD_ERR_SE | \
++ E1000_RXD_ERR_SEQ | \
++ E1000_RXD_ERR_CXE | \
++ E1000_RXD_ERR_RXE)
+
+ /* Same mask, but for extended and packet split descriptors */
+ #define E1000_RXDEXT_ERR_FRAME_ERR_MASK ( \
+- E1000_RXDEXT_STATERR_CE | \
+- E1000_RXDEXT_STATERR_SE | \
+- E1000_RXDEXT_STATERR_SEQ | \
+- E1000_RXDEXT_STATERR_CXE | \
++ E1000_RXDEXT_STATERR_CE | \
++ E1000_RXDEXT_STATERR_SE | \
++ E1000_RXDEXT_STATERR_SEQ | \
++ E1000_RXDEXT_STATERR_CXE | \
+ E1000_RXDEXT_STATERR_RXE)
+
+-#define E1000_MRQC_RSS_FIELD_IPV4_TCP 0x00010000
+-#define E1000_MRQC_RSS_FIELD_IPV4 0x00020000
+-#define E1000_MRQC_RSS_FIELD_IPV6_TCP_EX 0x00040000
+-#define E1000_MRQC_RSS_FIELD_IPV6 0x00100000
+-#define E1000_MRQC_RSS_FIELD_IPV6_TCP 0x00200000
++#define E1000_MRQC_RSS_FIELD_MASK 0xFFFF0000
++#define E1000_MRQC_RSS_FIELD_IPV4_TCP 0x00010000
++#define E1000_MRQC_RSS_FIELD_IPV4 0x00020000
++#define E1000_MRQC_RSS_FIELD_IPV6_TCP_EX 0x00040000
++#define E1000_MRQC_RSS_FIELD_IPV6 0x00100000
++#define E1000_MRQC_RSS_FIELD_IPV6_TCP 0x00200000
+
++#define E1000_RXDPS_HDRSTAT_HDRSP 0x00008000
+
+ /* Management Control */
+-#define E1000_MANC_SMBUS_EN 0x00000001 /* SMBus Enabled - RO */
+-#define E1000_MANC_ASF_EN 0x00000002 /* ASF Enabled - RO */
+-#define E1000_MANC_EN_BMC2OS 0x10000000 /* OSBMC is Enabled or not */
+-/* Enable Neighbor Discovery Filtering */
+-#define E1000_MANC_RCV_TCO_EN 0x00020000 /* Receive TCO Packets Enabled */
+-#define E1000_MANC_BLK_PHY_RST_ON_IDE 0x00040000 /* Block phy resets */
++#define E1000_MANC_SMBUS_EN 0x00000001 /* SMBus Enabled - RO */
++#define E1000_MANC_ASF_EN 0x00000002 /* ASF Enabled - RO */
++#define E1000_MANC_ARP_EN 0x00002000 /* Enable ARP Request Filtering */
++#define E1000_MANC_RCV_TCO_EN 0x00020000 /* Receive TCO Packets Enabled */
++#define E1000_MANC_BLK_PHY_RST_ON_IDE 0x00040000 /* Block phy resets */
+ /* Enable MAC address filtering */
+-#define E1000_MANC_EN_MAC_ADDR_FILTER 0x00100000
++#define E1000_MANC_EN_MAC_ADDR_FILTER 0x00100000
++/* Enable MNG packets to host memory */
++#define E1000_MANC_EN_MNG2HOST 0x00200000
++
++#define E1000_MANC2H_PORT_623 0x00000020 /* Port 0x26f */
++#define E1000_MANC2H_PORT_664 0x00000040 /* Port 0x298 */
++#define E1000_MDEF_PORT_623 0x00000800 /* Port 0x26f */
++#define E1000_MDEF_PORT_664 0x00000400 /* Port 0x298 */
+
+ /* Receive Control */
+-#define E1000_RCTL_EN 0x00000002 /* enable */
+-#define E1000_RCTL_SBP 0x00000004 /* store bad packet */
+-#define E1000_RCTL_UPE 0x00000008 /* unicast promiscuous enable */
+-#define E1000_RCTL_MPE 0x00000010 /* multicast promiscuous enab */
+-#define E1000_RCTL_LPE 0x00000020 /* long packet enable */
+-#define E1000_RCTL_LBM_MAC 0x00000040 /* MAC loopback mode */
+-#define E1000_RCTL_LBM_TCVR 0x000000C0 /* tcvr loopback mode */
+-#define E1000_RCTL_RDMTS_HALF 0x00000000 /* rx desc min threshold size */
+-#define E1000_RCTL_MO_SHIFT 12 /* multicast offset shift */
+-#define E1000_RCTL_BAM 0x00008000 /* broadcast enable */
+-#define E1000_RCTL_SZ_512 0x00020000 /* rx buffer size 512 */
+-#define E1000_RCTL_SZ_256 0x00030000 /* rx buffer size 256 */
+-#define E1000_RCTL_VFE 0x00040000 /* vlan filter enable */
+-#define E1000_RCTL_CFIEN 0x00080000 /* canonical form enable */
+-#define E1000_RCTL_DPF 0x00400000 /* Discard Pause Frames */
+-#define E1000_RCTL_PMCF 0x00800000 /* pass MAC control frames */
+-#define E1000_RCTL_SECRC 0x04000000 /* Strip Ethernet CRC */
++#define E1000_RCTL_RST 0x00000001 /* Software reset */
++#define E1000_RCTL_EN 0x00000002 /* enable */
++#define E1000_RCTL_SBP 0x00000004 /* store bad packet */
++#define E1000_RCTL_UPE 0x00000008 /* unicast promisc enable */
++#define E1000_RCTL_MPE 0x00000010 /* multicast promisc enable */
++#define E1000_RCTL_LPE 0x00000020 /* long packet enable */
++#define E1000_RCTL_LBM_NO 0x00000000 /* no loopback mode */
++#define E1000_RCTL_LBM_MAC 0x00000040 /* MAC loopback mode */
++#define E1000_RCTL_LBM_TCVR 0x000000C0 /* tcvr loopback mode */
++#define E1000_RCTL_DTYP_PS 0x00000400 /* Packet Split descriptor */
++#define E1000_RCTL_RDMTS_HALF 0x00000000 /* Rx desc min thresh size */
++#define E1000_RCTL_RDMTS_HEX 0x00010000
++#define E1000_RCTL_RDMTS1_HEX E1000_RCTL_RDMTS_HEX
++#define E1000_RCTL_MO_SHIFT 12 /* multicast offset shift */
++#define E1000_RCTL_MO_3 0x00003000 /* multicast offset 15:4 */
++#define E1000_RCTL_BAM 0x00008000 /* broadcast enable */
++/* these buffer sizes are valid if E1000_RCTL_BSEX is 0 */
++#define E1000_RCTL_SZ_2048 0x00000000 /* Rx buffer size 2048 */
++#define E1000_RCTL_SZ_1024 0x00010000 /* Rx buffer size 1024 */
++#define E1000_RCTL_SZ_512 0x00020000 /* Rx buffer size 512 */
++#define E1000_RCTL_SZ_256 0x00030000 /* Rx buffer size 256 */
++/* these buffer sizes are valid if E1000_RCTL_BSEX is 1 */
++#define E1000_RCTL_SZ_16384 0x00010000 /* Rx buffer size 16384 */
++#define E1000_RCTL_SZ_8192 0x00020000 /* Rx buffer size 8192 */
++#define E1000_RCTL_SZ_4096 0x00030000 /* Rx buffer size 4096 */
++#define E1000_RCTL_VFE 0x00040000 /* vlan filter enable */
++#define E1000_RCTL_CFIEN 0x00080000 /* canonical form enable */
++#define E1000_RCTL_CFI 0x00100000 /* canonical form indicator */
++#define E1000_RCTL_DPF 0x00400000 /* discard pause frames */
++#define E1000_RCTL_PMCF 0x00800000 /* pass MAC control frames */
++#define E1000_RCTL_BSEX 0x02000000 /* Buffer size extension */
++#define E1000_RCTL_SECRC 0x04000000 /* Strip Ethernet CRC */
+
+ /* Use byte values for the following shift parameters
+ * Usage:
+ * psrctl |= (((ROUNDUP(value0, 128) >> E1000_PSRCTL_BSIZE0_SHIFT) &
+- * E1000_PSRCTL_BSIZE0_MASK) |
+- * ((ROUNDUP(value1, 1024) >> E1000_PSRCTL_BSIZE1_SHIFT) &
+- * E1000_PSRCTL_BSIZE1_MASK) |
+- * ((ROUNDUP(value2, 1024) << E1000_PSRCTL_BSIZE2_SHIFT) &
+- * E1000_PSRCTL_BSIZE2_MASK) |
+- * ((ROUNDUP(value3, 1024) << E1000_PSRCTL_BSIZE3_SHIFT) |;
+- * E1000_PSRCTL_BSIZE3_MASK))
++ * E1000_PSRCTL_BSIZE0_MASK) |
++ * ((ROUNDUP(value1, 1024) >> E1000_PSRCTL_BSIZE1_SHIFT) &
++ * E1000_PSRCTL_BSIZE1_MASK) |
++ * ((ROUNDUP(value2, 1024) << E1000_PSRCTL_BSIZE2_SHIFT) &
++ * E1000_PSRCTL_BSIZE2_MASK) |
++ * ((ROUNDUP(value3, 1024) << E1000_PSRCTL_BSIZE3_SHIFT) |;
++ * E1000_PSRCTL_BSIZE3_MASK))
+ * where value0 = [128..16256], default=256
+ * value1 = [1024..64512], default=4096
+ * value2 = [0..64512], default=4096
+ * value3 = [0..64512], default=0
+ */
+
+-#define E1000_PSRCTL_BSIZE0_MASK 0x0000007F
+-#define E1000_PSRCTL_BSIZE1_MASK 0x00003F00
+-#define E1000_PSRCTL_BSIZE2_MASK 0x003F0000
+-#define E1000_PSRCTL_BSIZE3_MASK 0x3F000000
+-
+-#define E1000_PSRCTL_BSIZE0_SHIFT 7 /* Shift _right_ 7 */
+-#define E1000_PSRCTL_BSIZE1_SHIFT 2 /* Shift _right_ 2 */
+-#define E1000_PSRCTL_BSIZE2_SHIFT 6 /* Shift _left_ 6 */
+-#define E1000_PSRCTL_BSIZE3_SHIFT 14 /* Shift _left_ 14 */
++#define E1000_PSRCTL_BSIZE0_MASK 0x0000007F
++#define E1000_PSRCTL_BSIZE1_MASK 0x00003F00
++#define E1000_PSRCTL_BSIZE2_MASK 0x003F0000
++#define E1000_PSRCTL_BSIZE3_MASK 0x3F000000
++
++#define E1000_PSRCTL_BSIZE0_SHIFT 7 /* Shift _right_ 7 */
++#define E1000_PSRCTL_BSIZE1_SHIFT 2 /* Shift _right_ 2 */
++#define E1000_PSRCTL_BSIZE2_SHIFT 6 /* Shift _left_ 6 */
++#define E1000_PSRCTL_BSIZE3_SHIFT 14 /* Shift _left_ 14 */
+
+ /* SWFW_SYNC Definitions */
+-#define E1000_SWFW_EEP_SM 0x1
+-#define E1000_SWFW_PHY0_SM 0x2
+-#define E1000_SWFW_PHY1_SM 0x4
+-#define E1000_SWFW_PHY2_SM 0x20
+-#define E1000_SWFW_PHY3_SM 0x40
++#define E1000_SWFW_EEP_SM 0x01
++#define E1000_SWFW_PHY0_SM 0x02
++#define E1000_SWFW_PHY1_SM 0x04
++#define E1000_SWFW_CSR_SM 0x08
++#define E1000_SWFW_PHY2_SM 0x20
++#define E1000_SWFW_PHY3_SM 0x40
++#define E1000_SWFW_SW_MNG_SM 0x400
+
+-/* FACTPS Definitions */
+ /* Device Control */
+-#define E1000_CTRL_FD 0x00000001 /* Full duplex.0=half; 1=full */
+-#define E1000_CTRL_GIO_MASTER_DISABLE 0x00000004 /*Blocks new Master requests */
+-#define E1000_CTRL_LRST 0x00000008 /* Link reset. 0=normal,1=reset */
+-#define E1000_CTRL_ASDE 0x00000020 /* Auto-speed detect enable */
+-#define E1000_CTRL_SLU 0x00000040 /* Set link up (Force Link) */
+-#define E1000_CTRL_ILOS 0x00000080 /* Invert Loss-Of Signal */
+-#define E1000_CTRL_SPD_SEL 0x00000300 /* Speed Select Mask */
+-#define E1000_CTRL_SPD_100 0x00000100 /* Force 100Mb */
+-#define E1000_CTRL_SPD_1000 0x00000200 /* Force 1Gb */
+-#define E1000_CTRL_FRCSPD 0x00000800 /* Force Speed */
+-#define E1000_CTRL_FRCDPX 0x00001000 /* Force Duplex */
+-/* Defined polarity of Dock/Undock indication in SDP[0] */
+-/* Reset both PHY ports, through PHYRST_N pin */
+-/* enable link status from external LINK_0 and LINK_1 pins */
+-#define E1000_CTRL_SWDPIN0 0x00040000 /* SWDPIN 0 value */
+-#define E1000_CTRL_SWDPIN1 0x00080000 /* SWDPIN 1 value */
+-#define E1000_CTRL_SDP0_DIR 0x00400000 /* SDP0 Data direction */
+-#define E1000_CTRL_SDP1_DIR 0x00800000 /* SDP1 Data direction */
+-#define E1000_CTRL_RST 0x04000000 /* Global reset */
+-#define E1000_CTRL_RFCE 0x08000000 /* Receive Flow Control enable */
+-#define E1000_CTRL_TFCE 0x10000000 /* Transmit flow control enable */
+-#define E1000_CTRL_VME 0x40000000 /* IEEE VLAN mode enable */
+-#define E1000_CTRL_PHY_RST 0x80000000 /* PHY Reset */
+-/* Initiate an interrupt to manageability engine */
+-#define E1000_CTRL_I2C_ENA 0x02000000 /* I2C enable */
+-
+-/* Bit definitions for the Management Data IO (MDIO) and Management Data
+- * Clock (MDC) pins in the Device Control Register.
+- */
++#define E1000_CTRL_FD 0x00000001 /* Full duplex.0=half; 1=full */
++#define E1000_CTRL_PRIOR 0x00000004 /* Priority on PCI. 0=rx,1=fair */
++#define E1000_CTRL_GIO_MASTER_DISABLE 0x00000004 /*Blocks new Master reqs */
++#define E1000_CTRL_LRST 0x00000008 /* Link reset. 0=normal,1=reset */
++#define E1000_CTRL_ASDE 0x00000020 /* Auto-speed detect enable */
++#define E1000_CTRL_SLU 0x00000040 /* Set link up (Force Link) */
++#define E1000_CTRL_ILOS 0x00000080 /* Invert Loss-Of Signal */
++#define E1000_CTRL_SPD_SEL 0x00000300 /* Speed Select Mask */
++#define E1000_CTRL_SPD_10 0x00000000 /* Force 10Mb */
++#define E1000_CTRL_SPD_100 0x00000100 /* Force 100Mb */
++#define E1000_CTRL_SPD_1000 0x00000200 /* Force 1Gb */
++#define E1000_CTRL_FRCSPD 0x00000800 /* Force Speed */
++#define E1000_CTRL_FRCDPX 0x00001000 /* Force Duplex */
++#define E1000_CTRL_SWDPIN0 0x00040000 /* SWDPIN 0 value */
++#define E1000_CTRL_SWDPIN1 0x00080000 /* SWDPIN 1 value */
++#define E1000_CTRL_SWDPIN2 0x00100000 /* SWDPIN 2 value */
++#define E1000_CTRL_ADVD3WUC 0x00100000 /* D3 WUC */
++#define E1000_CTRL_SWDPIN3 0x00200000 /* SWDPIN 3 value */
++#define E1000_CTRL_SWDPIO0 0x00400000 /* SWDPIN 0 Input or output */
++#define E1000_CTRL_RST 0x04000000 /* Global reset */
++#define E1000_CTRL_RFCE 0x08000000 /* Receive Flow Control enable */
++#define E1000_CTRL_TFCE 0x10000000 /* Transmit flow control enable */
++#define E1000_CTRL_VME 0x40000000 /* IEEE VLAN mode enable */
++#define E1000_CTRL_PHY_RST 0x80000000 /* PHY Reset */
++#define E1000_CTRL_I2C_ENA 0x02000000 /* I2C enable */
+
+-#define E1000_CONNSW_ENRGSRC 0x4
++#define E1000_CONNSW_ENRGSRC 0x4
+ #define E1000_CONNSW_PHYSD 0x400
+ #define E1000_CONNSW_PHY_PDN 0x800
+ #define E1000_CONNSW_SERDESD 0x200
+ #define E1000_CONNSW_AUTOSENSE_CONF 0x2
+ #define E1000_CONNSW_AUTOSENSE_EN 0x1
+-#define E1000_PCS_CFG_PCS_EN 8
+-#define E1000_PCS_LCTL_FLV_LINK_UP 1
+-#define E1000_PCS_LCTL_FSV_100 2
+-#define E1000_PCS_LCTL_FSV_1000 4
+-#define E1000_PCS_LCTL_FDV_FULL 8
+-#define E1000_PCS_LCTL_FSD 0x10
+-#define E1000_PCS_LCTL_FORCE_LINK 0x20
+-#define E1000_PCS_LCTL_FORCE_FCTRL 0x80
+-#define E1000_PCS_LCTL_AN_ENABLE 0x10000
+-#define E1000_PCS_LCTL_AN_RESTART 0x20000
+-#define E1000_PCS_LCTL_AN_TIMEOUT 0x40000
+-#define E1000_ENABLE_SERDES_LOOPBACK 0x0410
+-
+-#define E1000_PCS_LSTS_LINK_OK 1
+-#define E1000_PCS_LSTS_SPEED_100 2
+-#define E1000_PCS_LSTS_SPEED_1000 4
+-#define E1000_PCS_LSTS_DUPLEX_FULL 8
+-#define E1000_PCS_LSTS_SYNK_OK 0x10
++#define E1000_PCS_CFG_PCS_EN 8
++#define E1000_PCS_LCTL_FLV_LINK_UP 1
++#define E1000_PCS_LCTL_FSV_10 0
++#define E1000_PCS_LCTL_FSV_100 2
++#define E1000_PCS_LCTL_FSV_1000 4
++#define E1000_PCS_LCTL_FDV_FULL 8
++#define E1000_PCS_LCTL_FSD 0x10
++#define E1000_PCS_LCTL_FORCE_LINK 0x20
++#define E1000_PCS_LCTL_FORCE_FCTRL 0x80
++#define E1000_PCS_LCTL_AN_ENABLE 0x10000
++#define E1000_PCS_LCTL_AN_RESTART 0x20000
++#define E1000_PCS_LCTL_AN_TIMEOUT 0x40000
++#define E1000_ENABLE_SERDES_LOOPBACK 0x0410
++
++#define E1000_PCS_LSTS_LINK_OK 1
++#define E1000_PCS_LSTS_SPEED_100 2
++#define E1000_PCS_LSTS_SPEED_1000 4
++#define E1000_PCS_LSTS_DUPLEX_FULL 8
++#define E1000_PCS_LSTS_SYNK_OK 0x10
++#define E1000_PCS_LSTS_AN_COMPLETE 0x10000
+
+ /* Device Status */
+-#define E1000_STATUS_FD 0x00000001 /* Full duplex.0=half,1=full */
+-#define E1000_STATUS_LU 0x00000002 /* Link up.0=no,1=link */
+-#define E1000_STATUS_FUNC_MASK 0x0000000C /* PCI Function Mask */
+-#define E1000_STATUS_FUNC_SHIFT 2
+-#define E1000_STATUS_FUNC_1 0x00000004 /* Function 1 */
+-#define E1000_STATUS_TXOFF 0x00000010 /* transmission paused */
+-#define E1000_STATUS_SPEED_100 0x00000040 /* Speed 100Mb/s */
+-#define E1000_STATUS_SPEED_1000 0x00000080 /* Speed 1000Mb/s */
+-/* Change in Dock/Undock state. Clear on write '0'. */
+-/* Status of Master requests. */
+-#define E1000_STATUS_GIO_MASTER_ENABLE 0x00080000
+-/* BMC external code execution disabled */
+-
++#define E1000_STATUS_FD 0x00000001 /* Duplex 0=half 1=full */
++#define E1000_STATUS_LU 0x00000002 /* Link up.0=no,1=link */
++#define E1000_STATUS_FUNC_MASK 0x0000000C /* PCI Function Mask */
++#define E1000_STATUS_FUNC_SHIFT 2
++#define E1000_STATUS_FUNC_1 0x00000004 /* Function 1 */
++#define E1000_STATUS_TXOFF 0x00000010 /* transmission paused */
++#define E1000_STATUS_SPEED_MASK 0x000000C0
++#define E1000_STATUS_SPEED_10 0x00000000 /* Speed 10Mb/s */
++#define E1000_STATUS_SPEED_100 0x00000040 /* Speed 100Mb/s */
++#define E1000_STATUS_SPEED_1000 0x00000080 /* Speed 1000Mb/s */
++#define E1000_STATUS_LAN_INIT_DONE 0x00000200 /* Lan Init Compltn by NVM */
++#define E1000_STATUS_PHYRA 0x00000400 /* PHY Reset Asserted */
++#define E1000_STATUS_GIO_MASTER_ENABLE 0x00080000 /* Master request status */
+ #define E1000_STATUS_2P5_SKU 0x00001000 /* Val of 2.5GBE SKU strap */
+ #define E1000_STATUS_2P5_SKU_OVER 0x00002000 /* Val of 2.5GBE SKU Over */
+-/* Constants used to intrepret the masked PCI-X bus speed. */
+
+-#define SPEED_10 10
+-#define SPEED_100 100
+-#define SPEED_1000 1000
+-#define SPEED_2500 2500
+-#define HALF_DUPLEX 1
+-#define FULL_DUPLEX 2
+-
+-
+-#define ADVERTISE_10_HALF 0x0001
+-#define ADVERTISE_10_FULL 0x0002
+-#define ADVERTISE_100_HALF 0x0004
+-#define ADVERTISE_100_FULL 0x0008
+-#define ADVERTISE_1000_HALF 0x0010 /* Not used, just FYI */
+-#define ADVERTISE_1000_FULL 0x0020
++#define SPEED_10 10
++#define SPEED_100 100
++#define SPEED_1000 1000
++#define SPEED_2500 2500
++#define HALF_DUPLEX 1
++#define FULL_DUPLEX 2
++
++#define ADVERTISE_10_HALF 0x0001
++#define ADVERTISE_10_FULL 0x0002
++#define ADVERTISE_100_HALF 0x0004
++#define ADVERTISE_100_FULL 0x0008
++#define ADVERTISE_1000_HALF 0x0010 /* Not used, just FYI */
++#define ADVERTISE_1000_FULL 0x0020
+
+ /* 1000/H is not supported, nor spec-compliant. */
+-#define E1000_ALL_SPEED_DUPLEX (ADVERTISE_10_HALF | ADVERTISE_10_FULL | \
+- ADVERTISE_100_HALF | ADVERTISE_100_FULL | \
+- ADVERTISE_1000_FULL)
+-#define E1000_ALL_NOT_GIG (ADVERTISE_10_HALF | ADVERTISE_10_FULL | \
+- ADVERTISE_100_HALF | ADVERTISE_100_FULL)
+-#define E1000_ALL_100_SPEED (ADVERTISE_100_HALF | ADVERTISE_100_FULL)
+-#define E1000_ALL_10_SPEED (ADVERTISE_10_HALF | ADVERTISE_10_FULL)
+-#define E1000_ALL_FULL_DUPLEX (ADVERTISE_10_FULL | ADVERTISE_100_FULL | \
+- ADVERTISE_1000_FULL)
+-#define E1000_ALL_HALF_DUPLEX (ADVERTISE_10_HALF | ADVERTISE_100_HALF)
++#define E1000_ALL_SPEED_DUPLEX ( \
++ ADVERTISE_10_HALF | ADVERTISE_10_FULL | ADVERTISE_100_HALF | \
++ ADVERTISE_100_FULL | ADVERTISE_1000_FULL)
++#define E1000_ALL_NOT_GIG ( \
++ ADVERTISE_10_HALF | ADVERTISE_10_FULL | ADVERTISE_100_HALF | \
++ ADVERTISE_100_FULL)
++#define E1000_ALL_100_SPEED (ADVERTISE_100_HALF | ADVERTISE_100_FULL)
++#define E1000_ALL_10_SPEED (ADVERTISE_10_HALF | ADVERTISE_10_FULL)
++#define E1000_ALL_HALF_DUPLEX (ADVERTISE_10_HALF | ADVERTISE_100_HALF)
+
+-#define AUTONEG_ADVERTISE_SPEED_DEFAULT E1000_ALL_SPEED_DUPLEX
++#define AUTONEG_ADVERTISE_SPEED_DEFAULT E1000_ALL_SPEED_DUPLEX
+
+ /* LED Control */
+-#define E1000_LEDCTL_LED0_MODE_SHIFT 0
+-#define E1000_LEDCTL_LED0_BLINK 0x00000080
+ #define E1000_LEDCTL_LED0_MODE_MASK 0x0000000F
++#define E1000_LEDCTL_LED0_MODE_SHIFT 0
+ #define E1000_LEDCTL_LED0_IVRT 0x00000040
++#define E1000_LEDCTL_LED0_BLINK 0x00000080
+
+-#define E1000_LEDCTL_MODE_LED_ON 0xE
+-#define E1000_LEDCTL_MODE_LED_OFF 0xF
++#define E1000_LEDCTL_MODE_LED_ON 0xE
++#define E1000_LEDCTL_MODE_LED_OFF 0xF
+
+ /* Transmit Descriptor bit definitions */
+-#define E1000_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */
+-#define E1000_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */
+-#define E1000_TXD_CMD_EOP 0x01000000 /* End of Packet */
+-#define E1000_TXD_CMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */
+-#define E1000_TXD_CMD_RS 0x08000000 /* Report Status */
+-#define E1000_TXD_CMD_DEXT 0x20000000 /* Descriptor extension (0 = legacy) */
+-#define E1000_TXD_STAT_DD 0x00000001 /* Descriptor Done */
+-/* Extended desc bits for Linksec and timesync */
++#define E1000_TXD_DTYP_D 0x00100000 /* Data Descriptor */
++#define E1000_TXD_DTYP_C 0x00000000 /* Context Descriptor */
++#define E1000_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */
++#define E1000_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */
++#define E1000_TXD_CMD_EOP 0x01000000 /* End of Packet */
++#define E1000_TXD_CMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */
++#define E1000_TXD_CMD_IC 0x04000000 /* Insert Checksum */
++#define E1000_TXD_CMD_RS 0x08000000 /* Report Status */
++#define E1000_TXD_CMD_RPS 0x10000000 /* Report Packet Sent */
++#define E1000_TXD_CMD_DEXT 0x20000000 /* Desc extension (0 = legacy) */
++#define E1000_TXD_CMD_VLE 0x40000000 /* Add VLAN tag */
++#define E1000_TXD_CMD_IDE 0x80000000 /* Enable Tidv register */
++#define E1000_TXD_STAT_DD 0x00000001 /* Descriptor Done */
++#define E1000_TXD_STAT_EC 0x00000002 /* Excess Collisions */
++#define E1000_TXD_STAT_LC 0x00000004 /* Late Collisions */
++#define E1000_TXD_STAT_TU 0x00000008 /* Transmit underrun */
++#define E1000_TXD_CMD_TCP 0x01000000 /* TCP packet */
++#define E1000_TXD_CMD_IP 0x02000000 /* IP packet */
++#define E1000_TXD_CMD_TSE 0x04000000 /* TCP Seg enable */
++#define E1000_TXD_STAT_TC 0x00000004 /* Tx Underrun */
++#define E1000_TXD_EXTCMD_TSTAMP 0x00000010 /* IEEE1588 Timestamp packet */
+
+ /* Transmit Control */
+-#define E1000_TCTL_EN 0x00000002 /* enable tx */
+-#define E1000_TCTL_PSP 0x00000008 /* pad short packets */
+-#define E1000_TCTL_CT 0x00000ff0 /* collision threshold */
+-#define E1000_TCTL_COLD 0x003ff000 /* collision distance */
+-#define E1000_TCTL_RTLC 0x01000000 /* Re-transmit on late collision */
+-
+-/* DMA Coalescing register fields */
+-#define E1000_DMACR_DMACWT_MASK 0x00003FFF /* DMA Coal Watchdog Timer */
+-#define E1000_DMACR_DMACTHR_MASK 0x00FF0000 /* DMA Coal Rx Threshold */
+-#define E1000_DMACR_DMACTHR_SHIFT 16
+-#define E1000_DMACR_DMAC_LX_MASK 0x30000000 /* Lx when no PCIe trans */
+-#define E1000_DMACR_DMAC_LX_SHIFT 28
+-#define E1000_DMACR_DMAC_EN 0x80000000 /* Enable DMA Coalescing */
+-/* DMA Coalescing BMC-to-OS Watchdog Enable */
+-#define E1000_DMACR_DC_BMC2OSW_EN 0x00008000
+-
+-#define E1000_DMCTXTH_DMCTTHR_MASK 0x00000FFF /* DMA Coal Tx Threshold */
+-
+-#define E1000_DMCTLX_TTLX_MASK 0x00000FFF /* Time to LX request */
+-
+-#define E1000_DMCRTRH_UTRESH_MASK 0x0007FFFF /* Rx Traffic Rate Thresh */
+-#define E1000_DMCRTRH_LRPRCW 0x80000000 /* Rx pkt rate curr window */
++#define E1000_TCTL_EN 0x00000002 /* enable Tx */
++#define E1000_TCTL_PSP 0x00000008 /* pad short packets */
++#define E1000_TCTL_CT 0x00000ff0 /* collision threshold */
++#define E1000_TCTL_COLD 0x003ff000 /* collision distance */
++#define E1000_TCTL_RTLC 0x01000000 /* Re-transmit on late collision */
++#define E1000_TCTL_MULR 0x10000000 /* Multiple request support */
+
+-#define E1000_DMCCNT_CCOUNT_MASK 0x01FFFFFF /* DMA Coal Rx Current Cnt */
+-
+-#define E1000_FCRTC_RTH_COAL_MASK 0x0003FFF0 /* FC Rx Thresh High val */
+-#define E1000_FCRTC_RTH_COAL_SHIFT 4
+-#define E1000_PCIEMISC_LX_DECISION 0x00000080 /* Lx power decision */
+-
+-/* Timestamp in Rx buffer */
+-#define E1000_RXPBS_CFG_TS_EN 0x80000000
+-
+-#define I210_RXPBSIZE_DEFAULT 0x000000A2 /* RXPBSIZE default */
+-#define I210_TXPBSIZE_DEFAULT 0x04000014 /* TXPBSIZE default */
++/* Transmit Arbitration Count */
++#define E1000_TARC0_ENABLE 0x00000400 /* Enable Tx Queue 0 */
+
+ /* SerDes Control */
+-#define E1000_SCTL_DISABLE_SERDES_LOOPBACK 0x0400
++#define E1000_SCTL_DISABLE_SERDES_LOOPBACK 0x0400
++#define E1000_SCTL_ENABLE_SERDES_LOOPBACK 0x0410
+
+ /* Receive Checksum Control */
+-#define E1000_RXCSUM_IPOFL 0x00000100 /* IPv4 checksum offload */
+-#define E1000_RXCSUM_TUOFL 0x00000200 /* TCP / UDP checksum offload */
+-#define E1000_RXCSUM_CRCOFL 0x00000800 /* CRC32 offload enable */
+-#define E1000_RXCSUM_PCSD 0x00002000 /* packet checksum disabled */
++#define E1000_RXCSUM_IPOFL 0x00000100 /* IPv4 checksum offload */
++#define E1000_RXCSUM_TUOFL 0x00000200 /* TCP / UDP checksum offload */
++#define E1000_RXCSUM_CRCOFL 0x00000800 /* CRC32 offload enable */
++#define E1000_RXCSUM_IPPCSE 0x00001000 /* IP payload checksum enable */
++#define E1000_RXCSUM_PCSD 0x00002000 /* packet checksum disabled */
+
+ /* Header split receive */
+-#define E1000_RFCTL_LEF 0x00040000
++#define E1000_RFCTL_NFSW_DIS 0x00000040
++#define E1000_RFCTL_NFSR_DIS 0x00000080
++#define E1000_RFCTL_ACK_DIS 0x00001000
++#define E1000_RFCTL_EXTEN 0x00008000
++#define E1000_RFCTL_IPV6_EX_DIS 0x00010000
++#define E1000_RFCTL_NEW_IPV6_EXT_DIS 0x00020000
++#define E1000_RFCTL_LEF 0x00040000
+
+ /* Collision related configuration parameters */
+-#define E1000_COLLISION_THRESHOLD 15
+-#define E1000_CT_SHIFT 4
+-#define E1000_COLLISION_DISTANCE 63
+-#define E1000_COLD_SHIFT 12
++#define E1000_COLLISION_THRESHOLD 15
++#define E1000_CT_SHIFT 4
++#define E1000_COLLISION_DISTANCE 63
++#define E1000_COLD_SHIFT 12
++
++/* Default values for the transmit IPG register */
++#define DEFAULT_82543_TIPG_IPGT_FIBER 9
++#define DEFAULT_82543_TIPG_IPGT_COPPER 8
++
++#define E1000_TIPG_IPGT_MASK 0x000003FF
++
++#define DEFAULT_82543_TIPG_IPGR1 8
++#define E1000_TIPG_IPGR1_SHIFT 10
++
++#define DEFAULT_82543_TIPG_IPGR2 6
++#define DEFAULT_80003ES2LAN_TIPG_IPGR2 7
++#define E1000_TIPG_IPGR2_SHIFT 20
+
+ /* Ethertype field values */
+-#define ETHERNET_IEEE_VLAN_TYPE 0x8100 /* 802.3ac packet */
++#define ETHERNET_IEEE_VLAN_TYPE 0x8100 /* 802.3ac packet */
++
++#define ETHERNET_FCS_SIZE 4
++#define MAX_JUMBO_FRAME_SIZE 0x3F00
++/* The datasheet maximum supported RX size is 9.5KB (9728 bytes) */
++#define MAX_RX_JUMBO_FRAME_SIZE 0x2600
++#define E1000_TX_PTR_GAP 0x1F
++
++/* Extended Configuration Control and Size */
++#define E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP 0x00000020
++#define E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE 0x00000001
++#define E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE 0x00000008
++#define E1000_EXTCNF_CTRL_SWFLAG 0x00000020
++#define E1000_EXTCNF_CTRL_GATE_PHY_CFG 0x00000080
++#define E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK 0x00FF0000
++#define E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_SHIFT 16
++#define E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK 0x0FFF0000
++#define E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT 16
++
++#define E1000_PHY_CTRL_D0A_LPLU 0x00000002
++#define E1000_PHY_CTRL_NOND0A_LPLU 0x00000004
++#define E1000_PHY_CTRL_NOND0A_GBE_DISABLE 0x00000008
++#define E1000_PHY_CTRL_GBE_DISABLE 0x00000040
+
+-#define MAX_JUMBO_FRAME_SIZE 0x3F00
++#define E1000_KABGTXD_BGSQLBIAS 0x00050000
+
+ /* PBA constants */
+-#define E1000_PBA_34K 0x0022
+-#define E1000_PBA_64K 0x0040 /* 64KB */
++#define E1000_PBA_8K 0x0008 /* 8KB */
++#define E1000_PBA_10K 0x000A /* 10KB */
++#define E1000_PBA_12K 0x000C /* 12KB */
++#define E1000_PBA_14K 0x000E /* 14KB */
++#define E1000_PBA_16K 0x0010 /* 16KB */
++#define E1000_PBA_18K 0x0012
++#define E1000_PBA_20K 0x0014
++#define E1000_PBA_22K 0x0016
++#define E1000_PBA_24K 0x0018
++#define E1000_PBA_26K 0x001A
++#define E1000_PBA_30K 0x001E
++#define E1000_PBA_32K 0x0020
++#define E1000_PBA_34K 0x0022
++#define E1000_PBA_35K 0x0023
++#define E1000_PBA_38K 0x0026
++#define E1000_PBA_40K 0x0028
++#define E1000_PBA_48K 0x0030 /* 48KB */
++#define E1000_PBA_64K 0x0040 /* 64KB */
++
++#define E1000_PBA_RXA_MASK 0xFFFF
++
++#define E1000_PBS_16K E1000_PBA_16K
++
++/* Uncorrectable/correctable ECC Error counts and enable bits */
++#define E1000_PBECCSTS_CORR_ERR_CNT_MASK 0x000000FF
++#define E1000_PBECCSTS_UNCORR_ERR_CNT_MASK 0x0000FF00
++#define E1000_PBECCSTS_UNCORR_ERR_CNT_SHIFT 8
++#define E1000_PBECCSTS_ECC_ENABLE 0x00010000
++
++#define IFS_MAX 80
++#define IFS_MIN 40
++#define IFS_RATIO 4
++#define IFS_STEP 10
++#define MIN_NUM_XMITS 1000
+
+ /* SW Semaphore Register */
+-#define E1000_SWSM_SMBI 0x00000001 /* Driver Semaphore bit */
+-#define E1000_SWSM_SWESMBI 0x00000002 /* FW Semaphore bit */
++#define E1000_SWSM_SMBI 0x00000001 /* Driver Semaphore bit */
++#define E1000_SWSM_SWESMBI 0x00000002 /* FW Semaphore bit */
++#define E1000_SWSM_DRV_LOAD 0x00000008 /* Driver Loaded Bit */
++
++#define E1000_SWSM2_LOCK 0x00000002 /* Secondary driver semaphore bit */
+
+ /* Interrupt Cause Read */
+-#define E1000_ICR_TXDW 0x00000001 /* Transmit desc written back */
+-#define E1000_ICR_LSC 0x00000004 /* Link Status Change */
+-#define E1000_ICR_RXSEQ 0x00000008 /* rx sequence error */
+-#define E1000_ICR_RXDMT0 0x00000010 /* rx desc min. threshold (0) */
+-#define E1000_ICR_RXT0 0x00000080 /* rx timer intr (ring 0) */
+-#define E1000_ICR_VMMB 0x00000100 /* VM MB event */
+-#define E1000_ICR_TS 0x00080000 /* Time Sync Interrupt */
+-#define E1000_ICR_DRSTA 0x40000000 /* Device Reset Asserted */
++#define E1000_ICR_TXDW 0x00000001 /* Transmit desc written back */
++#define E1000_ICR_TXQE 0x00000002 /* Transmit Queue empty */
++#define E1000_ICR_LSC 0x00000004 /* Link Status Change */
++#define E1000_ICR_RXSEQ 0x00000008 /* Rx sequence error */
++#define E1000_ICR_RXDMT0 0x00000010 /* Rx desc min. threshold (0) */
++#define E1000_ICR_RXO 0x00000040 /* Rx overrun */
++#define E1000_ICR_RXT0 0x00000080 /* Rx timer intr (ring 0) */
++#define E1000_ICR_VMMB 0x00000100 /* VM MB event */
++#define E1000_ICR_RXCFG 0x00000400 /* Rx /c/ ordered set */
++#define E1000_ICR_GPI_EN0 0x00000800 /* GP Int 0 */
++#define E1000_ICR_GPI_EN1 0x00001000 /* GP Int 1 */
++#define E1000_ICR_GPI_EN2 0x00002000 /* GP Int 2 */
++#define E1000_ICR_GPI_EN3 0x00004000 /* GP Int 3 */
++#define E1000_ICR_TXD_LOW 0x00008000
++#define E1000_ICR_MNG 0x00040000 /* Manageability event */
++#define E1000_ICR_ECCER 0x00400000 /* Uncorrectable ECC Error */
++#define E1000_ICR_TS 0x00080000 /* Time Sync Interrupt */
++#define E1000_ICR_DRSTA 0x40000000 /* Device Reset Asserted */
+ /* If this bit asserted, the driver should claim the interrupt */
+-#define E1000_ICR_INT_ASSERTED 0x80000000
+-/* LAN connected device generates an interrupt */
+-#define E1000_ICR_DOUTSYNC 0x10000000 /* NIC DMA out of sync */
++#define E1000_ICR_INT_ASSERTED 0x80000000
++#define E1000_ICR_DOUTSYNC 0x10000000 /* NIC DMA out of sync */
++#define E1000_ICR_FER 0x00400000 /* Fatal Error */
++
++#define E1000_ICR_THS 0x00800000 /* ICR.THS: Thermal Sensor Event*/
++#define E1000_ICR_MDDET 0x10000000 /* Malicious Driver Detect */
+
+ /* Extended Interrupt Cause Read */
+-#define E1000_EICR_RX_QUEUE0 0x00000001 /* Rx Queue 0 Interrupt */
+-#define E1000_EICR_RX_QUEUE1 0x00000002 /* Rx Queue 1 Interrupt */
+-#define E1000_EICR_RX_QUEUE2 0x00000004 /* Rx Queue 2 Interrupt */
+-#define E1000_EICR_RX_QUEUE3 0x00000008 /* Rx Queue 3 Interrupt */
+-#define E1000_EICR_TX_QUEUE0 0x00000100 /* Tx Queue 0 Interrupt */
+-#define E1000_EICR_TX_QUEUE1 0x00000200 /* Tx Queue 1 Interrupt */
+-#define E1000_EICR_TX_QUEUE2 0x00000400 /* Tx Queue 2 Interrupt */
+-#define E1000_EICR_TX_QUEUE3 0x00000800 /* Tx Queue 3 Interrupt */
+-#define E1000_EICR_OTHER 0x80000000 /* Interrupt Cause Active */
++#define E1000_EICR_RX_QUEUE0 0x00000001 /* Rx Queue 0 Interrupt */
++#define E1000_EICR_RX_QUEUE1 0x00000002 /* Rx Queue 1 Interrupt */
++#define E1000_EICR_RX_QUEUE2 0x00000004 /* Rx Queue 2 Interrupt */
++#define E1000_EICR_RX_QUEUE3 0x00000008 /* Rx Queue 3 Interrupt */
++#define E1000_EICR_TX_QUEUE0 0x00000100 /* Tx Queue 0 Interrupt */
++#define E1000_EICR_TX_QUEUE1 0x00000200 /* Tx Queue 1 Interrupt */
++#define E1000_EICR_TX_QUEUE2 0x00000400 /* Tx Queue 2 Interrupt */
++#define E1000_EICR_TX_QUEUE3 0x00000800 /* Tx Queue 3 Interrupt */
++#define E1000_EICR_TCP_TIMER 0x40000000 /* TCP Timer */
++#define E1000_EICR_OTHER 0x80000000 /* Interrupt Cause Active */
+ /* TCP Timer */
++#define E1000_TCPTIMER_KS 0x00000100 /* KickStart */
++#define E1000_TCPTIMER_COUNT_ENABLE 0x00000200 /* Count Enable */
++#define E1000_TCPTIMER_COUNT_FINISH 0x00000400 /* Count finish */
++#define E1000_TCPTIMER_LOOP 0x00000800 /* Loop */
+
+ /* This defines the bits that are set in the Interrupt Mask
+ * Set/Read Register. Each bit is documented below:
+@@ -404,194 +546,207 @@
+ E1000_IMS_TXDW | \
+ E1000_IMS_RXDMT0 | \
+ E1000_IMS_RXSEQ | \
+- E1000_IMS_LSC | \
+- E1000_IMS_DOUTSYNC)
++ E1000_IMS_LSC)
+
+ /* Interrupt Mask Set */
+-#define E1000_IMS_TXDW E1000_ICR_TXDW /* Transmit desc written back */
+-#define E1000_IMS_LSC E1000_ICR_LSC /* Link Status Change */
+-#define E1000_IMS_VMMB E1000_ICR_VMMB /* Mail box activity */
+-#define E1000_IMS_TS E1000_ICR_TS /* Time Sync Interrupt */
+-#define E1000_IMS_RXSEQ E1000_ICR_RXSEQ /* rx sequence error */
+-#define E1000_IMS_RXDMT0 E1000_ICR_RXDMT0 /* rx desc min. threshold */
+-#define E1000_IMS_RXT0 E1000_ICR_RXT0 /* rx timer intr */
+-#define E1000_IMS_DRSTA E1000_ICR_DRSTA /* Device Reset Asserted */
+-#define E1000_IMS_DOUTSYNC E1000_ICR_DOUTSYNC /* NIC DMA out of sync */
++#define E1000_IMS_TXDW E1000_ICR_TXDW /* Tx desc written back */
++#define E1000_IMS_TXQE E1000_ICR_TXQE /* Transmit Queue empty */
++#define E1000_IMS_LSC E1000_ICR_LSC /* Link Status Change */
++#define E1000_IMS_VMMB E1000_ICR_VMMB /* Mail box activity */
++#define E1000_IMS_RXSEQ E1000_ICR_RXSEQ /* Rx sequence error */
++#define E1000_IMS_RXDMT0 E1000_ICR_RXDMT0 /* Rx desc min. threshold */
++#define E1000_IMS_RXO E1000_ICR_RXO /* Rx overrun */
++#define E1000_IMS_RXT0 E1000_ICR_RXT0 /* Rx timer intr */
++#define E1000_IMS_TXD_LOW E1000_ICR_TXD_LOW
++#define E1000_IMS_ECCER E1000_ICR_ECCER /* Uncorrectable ECC Error */
++#define E1000_IMS_TS E1000_ICR_TS /* Time Sync Interrupt */
++#define E1000_IMS_DRSTA E1000_ICR_DRSTA /* Device Reset Asserted */
++#define E1000_IMS_DOUTSYNC E1000_ICR_DOUTSYNC /* NIC DMA out of sync */
++#define E1000_IMS_FER E1000_ICR_FER /* Fatal Error */
+
++#define E1000_IMS_THS E1000_ICR_THS /* ICR.TS: Thermal Sensor Event*/
++#define E1000_IMS_MDDET E1000_ICR_MDDET /* Malicious Driver Detect */
+ /* Extended Interrupt Mask Set */
+-#define E1000_EIMS_OTHER E1000_EICR_OTHER /* Interrupt Cause Active */
++#define E1000_EIMS_RX_QUEUE0 E1000_EICR_RX_QUEUE0 /* Rx Queue 0 Interrupt */
++#define E1000_EIMS_RX_QUEUE1 E1000_EICR_RX_QUEUE1 /* Rx Queue 1 Interrupt */
++#define E1000_EIMS_RX_QUEUE2 E1000_EICR_RX_QUEUE2 /* Rx Queue 2 Interrupt */
++#define E1000_EIMS_RX_QUEUE3 E1000_EICR_RX_QUEUE3 /* Rx Queue 3 Interrupt */
++#define E1000_EIMS_TX_QUEUE0 E1000_EICR_TX_QUEUE0 /* Tx Queue 0 Interrupt */
++#define E1000_EIMS_TX_QUEUE1 E1000_EICR_TX_QUEUE1 /* Tx Queue 1 Interrupt */
++#define E1000_EIMS_TX_QUEUE2 E1000_EICR_TX_QUEUE2 /* Tx Queue 2 Interrupt */
++#define E1000_EIMS_TX_QUEUE3 E1000_EICR_TX_QUEUE3 /* Tx Queue 3 Interrupt */
++#define E1000_EIMS_TCP_TIMER E1000_EICR_TCP_TIMER /* TCP Timer */
++#define E1000_EIMS_OTHER E1000_EICR_OTHER /* Interrupt Cause Active */
+
+ /* Interrupt Cause Set */
+-#define E1000_ICS_LSC E1000_ICR_LSC /* Link Status Change */
+-#define E1000_ICS_RXDMT0 E1000_ICR_RXDMT0 /* rx desc min. threshold */
+-#define E1000_ICS_DRSTA E1000_ICR_DRSTA /* Device Reset Aserted */
++#define E1000_ICS_LSC E1000_ICR_LSC /* Link Status Change */
++#define E1000_ICS_RXSEQ E1000_ICR_RXSEQ /* Rx sequence error */
++#define E1000_ICS_RXDMT0 E1000_ICR_RXDMT0 /* Rx desc min. threshold */
+
+ /* Extended Interrupt Cause Set */
+-/* E1000_EITR_CNT_IGNR is only for 82576 and newer */
+-#define E1000_EITR_CNT_IGNR 0x80000000 /* Don't reset counters on write */
++#define E1000_EICS_RX_QUEUE0 E1000_EICR_RX_QUEUE0 /* Rx Queue 0 Interrupt */
++#define E1000_EICS_RX_QUEUE1 E1000_EICR_RX_QUEUE1 /* Rx Queue 1 Interrupt */
++#define E1000_EICS_RX_QUEUE2 E1000_EICR_RX_QUEUE2 /* Rx Queue 2 Interrupt */
++#define E1000_EICS_RX_QUEUE3 E1000_EICR_RX_QUEUE3 /* Rx Queue 3 Interrupt */
++#define E1000_EICS_TX_QUEUE0 E1000_EICR_TX_QUEUE0 /* Tx Queue 0 Interrupt */
++#define E1000_EICS_TX_QUEUE1 E1000_EICR_TX_QUEUE1 /* Tx Queue 1 Interrupt */
++#define E1000_EICS_TX_QUEUE2 E1000_EICR_TX_QUEUE2 /* Tx Queue 2 Interrupt */
++#define E1000_EICS_TX_QUEUE3 E1000_EICR_TX_QUEUE3 /* Tx Queue 3 Interrupt */
++#define E1000_EICS_TCP_TIMER E1000_EICR_TCP_TIMER /* TCP Timer */
++#define E1000_EICS_OTHER E1000_EICR_OTHER /* Interrupt Cause Active */
+
++#define E1000_EITR_ITR_INT_MASK 0x0000FFFF
++/* E1000_EITR_CNT_IGNR is only for 82576 and newer */
++#define E1000_EITR_CNT_IGNR 0x80000000 /* Don't reset counters on write */
++#define E1000_EITR_INTERVAL 0x00007FFC
+
+ /* Transmit Descriptor Control */
++#define E1000_TXDCTL_PTHRESH 0x0000003F /* TXDCTL Prefetch Threshold */
++#define E1000_TXDCTL_HTHRESH 0x00003F00 /* TXDCTL Host Threshold */
++#define E1000_TXDCTL_WTHRESH 0x003F0000 /* TXDCTL Writeback Threshold */
++#define E1000_TXDCTL_GRAN 0x01000000 /* TXDCTL Granularity */
++#define E1000_TXDCTL_FULL_TX_DESC_WB 0x01010000 /* GRAN=1, WTHRESH=1 */
++#define E1000_TXDCTL_MAX_TX_DESC_PREFETCH 0x0100001F /* GRAN=1, PTHRESH=31 */
+ /* Enable the counting of descriptors still to be processed. */
++#define E1000_TXDCTL_COUNT_DESC 0x00400000
+
+ /* Flow Control Constants */
+-#define FLOW_CONTROL_ADDRESS_LOW 0x00C28001
+-#define FLOW_CONTROL_ADDRESS_HIGH 0x00000100
+-#define FLOW_CONTROL_TYPE 0x8808
+-
+-/* Transmit Config Word */
+-#define E1000_TXCW_ASM_DIR 0x00000100 /* TXCW astm pause direction */
+-#define E1000_TXCW_PAUSE 0x00000080 /* TXCW sym pause request */
++#define FLOW_CONTROL_ADDRESS_LOW 0x00C28001
++#define FLOW_CONTROL_ADDRESS_HIGH 0x00000100
++#define FLOW_CONTROL_TYPE 0x8808
+
+ /* 802.1q VLAN Packet Size */
+-#define VLAN_TAG_SIZE 4 /* 802.3ac tag (not DMA'd) */
+-#define E1000_VLAN_FILTER_TBL_SIZE 128 /* VLAN Filter Table (4096 bits) */
++#define VLAN_TAG_SIZE 4 /* 802.3ac tag (not DMA'd) */
++#define E1000_VLAN_FILTER_TBL_SIZE 128 /* VLAN Filter Table (4096 bits) */
+
+-/* Receive Address */
+-/* Number of high/low register pairs in the RAR. The RAR (Receive Address
++/* Receive Address
++ * Number of high/low register pairs in the RAR. The RAR (Receive Address
+ * Registers) holds the directed and multicast addresses that we monitor.
+ * Technically, we have 16 spots. However, we reserve one of these spots
+ * (RAR[15]) for our directed address used by controllers with
+ * manageability enabled, allowing us room for 15 multicast addresses.
+ */
+-#define E1000_RAH_AV 0x80000000 /* Receive descriptor valid */
+-#define E1000_RAL_MAC_ADDR_LEN 4
+-#define E1000_RAH_MAC_ADDR_LEN 2
+-#define E1000_RAH_POOL_MASK 0x03FC0000
+-#define E1000_RAH_POOL_1 0x00040000
++#define E1000_RAR_ENTRIES 15
++#define E1000_RAH_AV 0x80000000 /* Receive descriptor valid */
++#define E1000_RAL_MAC_ADDR_LEN 4
++#define E1000_RAH_MAC_ADDR_LEN 2
++#define E1000_RAH_QUEUE_MASK_82575 0x000C0000
++#define E1000_RAH_POOL_1 0x00040000
+
+ /* Error Codes */
+-#define E1000_ERR_NVM 1
+-#define E1000_ERR_PHY 2
+-#define E1000_ERR_CONFIG 3
+-#define E1000_ERR_PARAM 4
+-#define E1000_ERR_MAC_INIT 5
+-#define E1000_ERR_RESET 9
+-#define E1000_ERR_MASTER_REQUESTS_PENDING 10
+-#define E1000_BLK_PHY_RESET 12
+-#define E1000_ERR_SWFW_SYNC 13
+-#define E1000_NOT_IMPLEMENTED 14
+-#define E1000_ERR_MBX 15
+-#define E1000_ERR_INVALID_ARGUMENT 16
+-#define E1000_ERR_NO_SPACE 17
+-#define E1000_ERR_NVM_PBA_SECTION 18
+-#define E1000_ERR_INVM_VALUE_NOT_FOUND 19
+-#define E1000_ERR_I2C 20
++#define E1000_SUCCESS 0
++#define E1000_ERR_NVM 1
++#define E1000_ERR_PHY 2
++#define E1000_ERR_CONFIG 3
++#define E1000_ERR_PARAM 4
++#define E1000_ERR_MAC_INIT 5
++#define E1000_ERR_PHY_TYPE 6
++#define E1000_ERR_RESET 9
++#define E1000_ERR_MASTER_REQUESTS_PENDING 10
++#define E1000_ERR_HOST_INTERFACE_COMMAND 11
++#define E1000_BLK_PHY_RESET 12
++#define E1000_ERR_SWFW_SYNC 13
++#define E1000_NOT_IMPLEMENTED 14
++#define E1000_ERR_MBX 15
++#define E1000_ERR_INVALID_ARGUMENT 16
++#define E1000_ERR_NO_SPACE 17
++#define E1000_ERR_NVM_PBA_SECTION 18
++#define E1000_ERR_I2C 19
++#define E1000_ERR_INVM_VALUE_NOT_FOUND 20
+
+ /* Loop limit on how long we wait for auto-negotiation to complete */
+-#define COPPER_LINK_UP_LIMIT 10
+-#define PHY_AUTO_NEG_LIMIT 45
+-#define PHY_FORCE_LIMIT 20
++#define FIBER_LINK_UP_LIMIT 50
++#define COPPER_LINK_UP_LIMIT 10
++#define PHY_AUTO_NEG_LIMIT 45
++#define PHY_FORCE_LIMIT 20
+ /* Number of 100 microseconds we wait for PCI Express master disable */
+-#define MASTER_DISABLE_TIMEOUT 800
++#define MASTER_DISABLE_TIMEOUT 800
+ /* Number of milliseconds we wait for PHY configuration done after MAC reset */
+-#define PHY_CFG_TIMEOUT 100
++#define PHY_CFG_TIMEOUT 100
+ /* Number of 2 milliseconds we wait for acquiring MDIO ownership. */
++#define MDIO_OWNERSHIP_TIMEOUT 10
+ /* Number of milliseconds for NVM auto read done after MAC reset. */
+-#define AUTO_READ_DONE_TIMEOUT 10
++#define AUTO_READ_DONE_TIMEOUT 10
+
+ /* Flow Control */
+-#define E1000_FCRTL_XONE 0x80000000 /* Enable XON frame transmission */
++#define E1000_FCRTH_RTH 0x0000FFF8 /* Mask Bits[15:3] for RTH */
++#define E1000_FCRTL_RTL 0x0000FFF8 /* Mask Bits[15:3] for RTL */
++#define E1000_FCRTL_XONE 0x80000000 /* Enable XON frame transmission */
+
+-#define E1000_TSYNCTXCTL_VALID 0x00000001 /* tx timestamp valid */
+-#define E1000_TSYNCTXCTL_ENABLED 0x00000010 /* enable tx timestampping */
++/* Transmit Configuration Word */
++#define E1000_TXCW_FD 0x00000020 /* TXCW full duplex */
++#define E1000_TXCW_PAUSE 0x00000080 /* TXCW sym pause request */
++#define E1000_TXCW_ASM_DIR 0x00000100 /* TXCW astm pause direction */
++#define E1000_TXCW_PAUSE_MASK 0x00000180 /* TXCW pause request mask */
++#define E1000_TXCW_ANE 0x80000000 /* Auto-neg enable */
+
+-#define E1000_TSYNCRXCTL_VALID 0x00000001 /* rx timestamp valid */
+-#define E1000_TSYNCRXCTL_TYPE_MASK 0x0000000E /* rx type mask */
+-#define E1000_TSYNCRXCTL_TYPE_L2_V2 0x00
+-#define E1000_TSYNCRXCTL_TYPE_L4_V1 0x02
+-#define E1000_TSYNCRXCTL_TYPE_L2_L4_V2 0x04
+-#define E1000_TSYNCRXCTL_TYPE_ALL 0x08
+-#define E1000_TSYNCRXCTL_TYPE_EVENT_V2 0x0A
+-#define E1000_TSYNCRXCTL_ENABLED 0x00000010 /* enable rx timestampping */
+-
+-#define E1000_TSYNCRXCFG_PTP_V1_CTRLT_MASK 0x000000FF
+-#define E1000_TSYNCRXCFG_PTP_V1_SYNC_MESSAGE 0x00
+-#define E1000_TSYNCRXCFG_PTP_V1_DELAY_REQ_MESSAGE 0x01
+-#define E1000_TSYNCRXCFG_PTP_V1_FOLLOWUP_MESSAGE 0x02
+-#define E1000_TSYNCRXCFG_PTP_V1_DELAY_RESP_MESSAGE 0x03
+-#define E1000_TSYNCRXCFG_PTP_V1_MANAGEMENT_MESSAGE 0x04
+-
+-#define E1000_TSYNCRXCFG_PTP_V2_MSGID_MASK 0x00000F00
+-#define E1000_TSYNCRXCFG_PTP_V2_SYNC_MESSAGE 0x0000
+-#define E1000_TSYNCRXCFG_PTP_V2_DELAY_REQ_MESSAGE 0x0100
+-#define E1000_TSYNCRXCFG_PTP_V2_PATH_DELAY_REQ_MESSAGE 0x0200
+-#define E1000_TSYNCRXCFG_PTP_V2_PATH_DELAY_RESP_MESSAGE 0x0300
+-#define E1000_TSYNCRXCFG_PTP_V2_FOLLOWUP_MESSAGE 0x0800
+-#define E1000_TSYNCRXCFG_PTP_V2_DELAY_RESP_MESSAGE 0x0900
+-#define E1000_TSYNCRXCFG_PTP_V2_PATH_DELAY_FOLLOWUP_MESSAGE 0x0A00
+-#define E1000_TSYNCRXCFG_PTP_V2_ANNOUNCE_MESSAGE 0x0B00
+-#define E1000_TSYNCRXCFG_PTP_V2_SIGNALLING_MESSAGE 0x0C00
+-#define E1000_TSYNCRXCFG_PTP_V2_MANAGEMENT_MESSAGE 0x0D00
+-
+-#define E1000_TIMINCA_16NS_SHIFT 24
+-
+-/* Time Sync Interrupt Cause/Mask Register Bits */
+-
+-#define TSINTR_SYS_WRAP (1 << 0) /* SYSTIM Wrap around. */
+-#define TSINTR_TXTS (1 << 1) /* Transmit Timestamp. */
+-#define TSINTR_RXTS (1 << 2) /* Receive Timestamp. */
+-#define TSINTR_TT0 (1 << 3) /* Target Time 0 Trigger. */
+-#define TSINTR_TT1 (1 << 4) /* Target Time 1 Trigger. */
+-#define TSINTR_AUTT0 (1 << 5) /* Auxiliary Timestamp 0 Taken. */
+-#define TSINTR_AUTT1 (1 << 6) /* Auxiliary Timestamp 1 Taken. */
+-#define TSINTR_TADJ (1 << 7) /* Time Adjust Done. */
+-
+-#define TSYNC_INTERRUPTS TSINTR_TXTS
+-#define E1000_TSICR_TXTS TSINTR_TXTS
+-
+-/* TSAUXC Configuration Bits */
+-#define TSAUXC_EN_TT0 (1 << 0) /* Enable target time 0. */
+-#define TSAUXC_EN_TT1 (1 << 1) /* Enable target time 1. */
+-#define TSAUXC_EN_CLK0 (1 << 2) /* Enable Configurable Frequency Clock 0. */
+-#define TSAUXC_SAMP_AUT0 (1 << 3) /* Latch SYSTIML/H into AUXSTMPL/0. */
+-#define TSAUXC_ST0 (1 << 4) /* Start Clock 0 Toggle on Target Time 0. */
+-#define TSAUXC_EN_CLK1 (1 << 5) /* Enable Configurable Frequency Clock 1. */
+-#define TSAUXC_SAMP_AUT1 (1 << 6) /* Latch SYSTIML/H into AUXSTMPL/1. */
+-#define TSAUXC_ST1 (1 << 7) /* Start Clock 1 Toggle on Target Time 1. */
+-#define TSAUXC_EN_TS0 (1 << 8) /* Enable hardware timestamp 0. */
+-#define TSAUXC_AUTT0 (1 << 9) /* Auxiliary Timestamp Taken. */
+-#define TSAUXC_EN_TS1 (1 << 10) /* Enable hardware timestamp 0. */
+-#define TSAUXC_AUTT1 (1 << 11) /* Auxiliary Timestamp Taken. */
+-#define TSAUXC_PLSG (1 << 17) /* Generate a pulse. */
+-#define TSAUXC_DISABLE (1 << 31) /* Disable SYSTIM Count Operation. */
+-
+-/* SDP Configuration Bits */
+-#define AUX0_SEL_SDP0 (0 << 0) /* Assign SDP0 to auxiliary time stamp 0. */
+-#define AUX0_SEL_SDP1 (1 << 0) /* Assign SDP1 to auxiliary time stamp 0. */
+-#define AUX0_SEL_SDP2 (2 << 0) /* Assign SDP2 to auxiliary time stamp 0. */
+-#define AUX0_SEL_SDP3 (3 << 0) /* Assign SDP3 to auxiliary time stamp 0. */
+-#define AUX0_TS_SDP_EN (1 << 2) /* Enable auxiliary time stamp trigger 0. */
+-#define AUX1_SEL_SDP0 (0 << 3) /* Assign SDP0 to auxiliary time stamp 1. */
+-#define AUX1_SEL_SDP1 (1 << 3) /* Assign SDP1 to auxiliary time stamp 1. */
+-#define AUX1_SEL_SDP2 (2 << 3) /* Assign SDP2 to auxiliary time stamp 1. */
+-#define AUX1_SEL_SDP3 (3 << 3) /* Assign SDP3 to auxiliary time stamp 1. */
+-#define AUX1_TS_SDP_EN (1 << 5) /* Enable auxiliary time stamp trigger 1. */
+-#define TS_SDP0_SEL_TT0 (0 << 6) /* Target time 0 is output on SDP0. */
+-#define TS_SDP0_SEL_TT1 (1 << 6) /* Target time 1 is output on SDP0. */
+-#define TS_SDP0_SEL_FC0 (2 << 6) /* Freq clock 0 is output on SDP0. */
+-#define TS_SDP0_SEL_FC1 (3 << 6) /* Freq clock 1 is output on SDP0. */
+-#define TS_SDP0_EN (1 << 8) /* SDP0 is assigned to Tsync. */
+-#define TS_SDP1_SEL_TT0 (0 << 9) /* Target time 0 is output on SDP1. */
+-#define TS_SDP1_SEL_TT1 (1 << 9) /* Target time 1 is output on SDP1. */
+-#define TS_SDP1_SEL_FC0 (2 << 9) /* Freq clock 0 is output on SDP1. */
+-#define TS_SDP1_SEL_FC1 (3 << 9) /* Freq clock 1 is output on SDP1. */
+-#define TS_SDP1_EN (1 << 11) /* SDP1 is assigned to Tsync. */
+-#define TS_SDP2_SEL_TT0 (0 << 12) /* Target time 0 is output on SDP2. */
+-#define TS_SDP2_SEL_TT1 (1 << 12) /* Target time 1 is output on SDP2. */
+-#define TS_SDP2_SEL_FC0 (2 << 12) /* Freq clock 0 is output on SDP2. */
+-#define TS_SDP2_SEL_FC1 (3 << 12) /* Freq clock 1 is output on SDP2. */
+-#define TS_SDP2_EN (1 << 14) /* SDP2 is assigned to Tsync. */
+-#define TS_SDP3_SEL_TT0 (0 << 15) /* Target time 0 is output on SDP3. */
+-#define TS_SDP3_SEL_TT1 (1 << 15) /* Target time 1 is output on SDP3. */
+-#define TS_SDP3_SEL_FC0 (2 << 15) /* Freq clock 0 is output on SDP3. */
+-#define TS_SDP3_SEL_FC1 (3 << 15) /* Freq clock 1 is output on SDP3. */
+-#define TS_SDP3_EN (1 << 17) /* SDP3 is assigned to Tsync. */
+-
+-#define E1000_MDICNFG_EXT_MDIO 0x80000000 /* MDI ext/int destination */
+-#define E1000_MDICNFG_COM_MDIO 0x40000000 /* MDI shared w/ lan 0 */
+-#define E1000_MDICNFG_PHY_MASK 0x03E00000
+-#define E1000_MDICNFG_PHY_SHIFT 21
++/* Receive Configuration Word */
++#define E1000_RXCW_CW 0x0000ffff /* RxConfigWord mask */
++#define E1000_RXCW_IV 0x08000000 /* Receive config invalid */
++#define E1000_RXCW_C 0x20000000 /* Receive config */
++#define E1000_RXCW_SYNCH 0x40000000 /* Receive config synch */
++
++#define E1000_TSYNCTXCTL_VALID 0x00000001 /* Tx timestamp valid */
++#define E1000_TSYNCTXCTL_ENABLED 0x00000010 /* enable Tx timestamping */
++
++#define E1000_TSYNCRXCTL_VALID 0x00000001 /* Rx timestamp valid */
++#define E1000_TSYNCRXCTL_TYPE_MASK 0x0000000E /* Rx type mask */
++#define E1000_TSYNCRXCTL_TYPE_L2_V2 0x00
++#define E1000_TSYNCRXCTL_TYPE_L4_V1 0x02
++#define E1000_TSYNCRXCTL_TYPE_L2_L4_V2 0x04
++#define E1000_TSYNCRXCTL_TYPE_ALL 0x08
++#define E1000_TSYNCRXCTL_TYPE_EVENT_V2 0x0A
++#define E1000_TSYNCRXCTL_ENABLED 0x00000010 /* enable Rx timestamping */
++#define E1000_TSYNCRXCTL_SYSCFI 0x00000020 /* Sys clock frequency */
++
++#define E1000_TSYNCRXCFG_PTP_V1_CTRLT_MASK 0x000000FF
++#define E1000_TSYNCRXCFG_PTP_V1_SYNC_MESSAGE 0x00
++#define E1000_TSYNCRXCFG_PTP_V1_DELAY_REQ_MESSAGE 0x01
++#define E1000_TSYNCRXCFG_PTP_V1_FOLLOWUP_MESSAGE 0x02
++#define E1000_TSYNCRXCFG_PTP_V1_DELAY_RESP_MESSAGE 0x03
++#define E1000_TSYNCRXCFG_PTP_V1_MANAGEMENT_MESSAGE 0x04
++
++#define E1000_TSYNCRXCFG_PTP_V2_MSGID_MASK 0x00000F00
++#define E1000_TSYNCRXCFG_PTP_V2_SYNC_MESSAGE 0x0000
++#define E1000_TSYNCRXCFG_PTP_V2_DELAY_REQ_MESSAGE 0x0100
++#define E1000_TSYNCRXCFG_PTP_V2_PATH_DELAY_REQ_MESSAGE 0x0200
++#define E1000_TSYNCRXCFG_PTP_V2_PATH_DELAY_RESP_MESSAGE 0x0300
++#define E1000_TSYNCRXCFG_PTP_V2_FOLLOWUP_MESSAGE 0x0800
++#define E1000_TSYNCRXCFG_PTP_V2_DELAY_RESP_MESSAGE 0x0900
++#define E1000_TSYNCRXCFG_PTP_V2_PATH_DELAY_FOLLOWUP_MESSAGE 0x0A00
++#define E1000_TSYNCRXCFG_PTP_V2_ANNOUNCE_MESSAGE 0x0B00
++#define E1000_TSYNCRXCFG_PTP_V2_SIGNALLING_MESSAGE 0x0C00
++#define E1000_TSYNCRXCFG_PTP_V2_MANAGEMENT_MESSAGE 0x0D00
++
++#define E1000_TIMINCA_16NS_SHIFT 24
++#define E1000_TIMINCA_INCPERIOD_SHIFT 24
++#define E1000_TIMINCA_INCVALUE_MASK 0x00FFFFFF
++
++#define E1000_TSICR_TXTS 0x00000002
++#define E1000_TSIM_TXTS 0x00000002
++/* TUPLE Filtering Configuration */
++#define E1000_TTQF_DISABLE_MASK 0xF0008000 /* TTQF Disable Mask */
++#define E1000_TTQF_QUEUE_ENABLE 0x100 /* TTQF Queue Enable Bit */
++#define E1000_TTQF_PROTOCOL_MASK 0xFF /* TTQF Protocol Mask */
++/* TTQF TCP Bit, shift with E1000_TTQF_PROTOCOL SHIFT */
++#define E1000_TTQF_PROTOCOL_TCP 0x0
++/* TTQF UDP Bit, shift with E1000_TTQF_PROTOCOL_SHIFT */
++#define E1000_TTQF_PROTOCOL_UDP 0x1
++/* TTQF SCTP Bit, shift with E1000_TTQF_PROTOCOL_SHIFT */
++#define E1000_TTQF_PROTOCOL_SCTP 0x2
++#define E1000_TTQF_PROTOCOL_SHIFT 5 /* TTQF Protocol Shift */
++#define E1000_TTQF_QUEUE_SHIFT 16 /* TTQF Queue Shfit */
++#define E1000_TTQF_RX_QUEUE_MASK 0x70000 /* TTQF Queue Mask */
++#define E1000_TTQF_MASK_ENABLE 0x10000000 /* TTQF Mask Enable Bit */
++#define E1000_IMIR_CLEAR_MASK 0xF001FFFF /* IMIR Reg Clear Mask */
++#define E1000_IMIR_PORT_BYPASS 0x20000 /* IMIR Port Bypass Bit */
++#define E1000_IMIR_PRIORITY_SHIFT 29 /* IMIR Priority Shift */
++#define E1000_IMIREXT_CLEAR_MASK 0x7FFFF /* IMIREXT Reg Clear Mask */
++
++#define E1000_MDICNFG_EXT_MDIO 0x80000000 /* MDI ext/int destination */
++#define E1000_MDICNFG_COM_MDIO 0x40000000 /* MDI shared w/ lan 0 */
++#define E1000_MDICNFG_PHY_MASK 0x03E00000
++#define E1000_MDICNFG_PHY_SHIFT 21
+
+ #define E1000_MEDIA_PORT_COPPER 1
+ #define E1000_MEDIA_PORT_OTHER 2
+@@ -604,95 +759,209 @@
+ #define E1000_M88E1112_PAGE_ADDR 0x16
+ #define E1000_M88E1112_STATUS 0x01
+
++#define E1000_THSTAT_LOW_EVENT 0x20000000 /* Low thermal threshold */
++#define E1000_THSTAT_MID_EVENT 0x00200000 /* Mid thermal threshold */
++#define E1000_THSTAT_HIGH_EVENT 0x00002000 /* High thermal threshold */
++#define E1000_THSTAT_PWR_DOWN 0x00000001 /* Power Down Event */
++#define E1000_THSTAT_LINK_THROTTLE 0x00000002 /* Link Spd Throttle Event */
++
++/* I350 EEE defines */
++#define E1000_IPCNFG_EEE_1G_AN 0x00000008 /* IPCNFG EEE Ena 1G AN */
++#define E1000_IPCNFG_EEE_100M_AN 0x00000004 /* IPCNFG EEE Ena 100M AN */
++#define E1000_EEER_TX_LPI_EN 0x00010000 /* EEER Tx LPI Enable */
++#define E1000_EEER_RX_LPI_EN 0x00020000 /* EEER Rx LPI Enable */
++#define E1000_EEER_LPI_FC 0x00040000 /* EEER Ena on Flow Cntrl */
++/* EEE status */
++#define E1000_EEER_EEE_NEG 0x20000000 /* EEE capability nego */
++#define E1000_EEER_RX_LPI_STATUS 0x40000000 /* Rx in LPI state */
++#define E1000_EEER_TX_LPI_STATUS 0x80000000 /* Tx in LPI state */
++#define E1000_EEE_LP_ADV_ADDR_I350 0x040F /* EEE LP Advertisement */
++#define E1000_M88E1543_PAGE_ADDR 0x16 /* Page Offset Register */
++#define E1000_M88E1543_EEE_CTRL_1 0x0
++#define E1000_M88E1543_EEE_CTRL_1_MS 0x0001 /* EEE Master/Slave */
++#define E1000_M88E1543_FIBER_CTRL 0x0 /* Fiber Control Register */
++#define E1000_EEE_ADV_DEV_I354 7
++#define E1000_EEE_ADV_ADDR_I354 60
++#define E1000_EEE_ADV_100_SUPPORTED (1 << 1) /* 100BaseTx EEE Supported */
++#define E1000_EEE_ADV_1000_SUPPORTED (1 << 2) /* 1000BaseT EEE Supported */
++#define E1000_PCS_STATUS_DEV_I354 3
++#define E1000_PCS_STATUS_ADDR_I354 1
++#define E1000_PCS_STATUS_RX_LPI_RCVD 0x0400
++#define E1000_PCS_STATUS_TX_LPI_RCVD 0x0800
++#define E1000_M88E1512_CFG_REG_1 0x0010
++#define E1000_M88E1512_CFG_REG_2 0x0011
++#define E1000_M88E1512_CFG_REG_3 0x0007
++#define E1000_M88E1512_MODE 0x0014
++#define E1000_EEE_SU_LPI_CLK_STP 0x00800000 /* EEE LPI Clock Stop */
++#define E1000_EEE_LP_ADV_DEV_I210 7 /* EEE LP Adv Device */
++#define E1000_EEE_LP_ADV_ADDR_I210 61 /* EEE LP Adv Register */
+ /* PCI Express Control */
+-#define E1000_GCR_CMPL_TMOUT_MASK 0x0000F000
+-#define E1000_GCR_CMPL_TMOUT_10ms 0x00001000
+-#define E1000_GCR_CMPL_TMOUT_RESEND 0x00010000
+-#define E1000_GCR_CAP_VER2 0x00040000
+-
+-/* mPHY Address Control and Data Registers */
+-#define E1000_MPHY_ADDR_CTL 0x0024 /* mPHY Address Control Register */
+-#define E1000_MPHY_ADDR_CTL_OFFSET_MASK 0xFFFF0000
+-#define E1000_MPHY_DATA 0x0E10 /* mPHY Data Register */
+-
+-/* mPHY PCS CLK Register */
+-#define E1000_MPHY_PCS_CLK_REG_OFFSET 0x0004 /* mPHY PCS CLK AFE CSR Offset */
+-/* mPHY Near End Digital Loopback Override Bit */
+-#define E1000_MPHY_PCS_CLK_REG_DIGINELBEN 0x10
+-
+-#define E1000_PCS_LCTL_FORCE_FCTRL 0x80
+-#define E1000_PCS_LSTS_AN_COMPLETE 0x10000
++#define E1000_GCR_RXD_NO_SNOOP 0x00000001
++#define E1000_GCR_RXDSCW_NO_SNOOP 0x00000002
++#define E1000_GCR_RXDSCR_NO_SNOOP 0x00000004
++#define E1000_GCR_TXD_NO_SNOOP 0x00000008
++#define E1000_GCR_TXDSCW_NO_SNOOP 0x00000010
++#define E1000_GCR_TXDSCR_NO_SNOOP 0x00000020
++#define E1000_GCR_CMPL_TMOUT_MASK 0x0000F000
++#define E1000_GCR_CMPL_TMOUT_10ms 0x00001000
++#define E1000_GCR_CMPL_TMOUT_RESEND 0x00010000
++#define E1000_GCR_CAP_VER2 0x00040000
++
++#define PCIE_NO_SNOOP_ALL (E1000_GCR_RXD_NO_SNOOP | \
++ E1000_GCR_RXDSCW_NO_SNOOP | \
++ E1000_GCR_RXDSCR_NO_SNOOP | \
++ E1000_GCR_TXD_NO_SNOOP | \
++ E1000_GCR_TXDSCW_NO_SNOOP | \
++ E1000_GCR_TXDSCR_NO_SNOOP)
++
++#define E1000_MMDAC_FUNC_DATA 0x4000 /* Data, no post increment */
++
++/* mPHY address control and data registers */
++#define E1000_MPHY_ADDR_CTL 0x0024 /* Address Control Reg */
++#define E1000_MPHY_ADDR_CTL_OFFSET_MASK 0xFFFF0000
++#define E1000_MPHY_DATA 0x0E10 /* Data Register */
++
++/* AFE CSR Offset for PCS CLK */
++#define E1000_MPHY_PCS_CLK_REG_OFFSET 0x0004
++/* Override for near end digital loopback. */
++#define E1000_MPHY_PCS_CLK_REG_DIGINELBEN 0x10
+
+ /* PHY Control Register */
+-#define MII_CR_FULL_DUPLEX 0x0100 /* FDX =1, half duplex =0 */
+-#define MII_CR_RESTART_AUTO_NEG 0x0200 /* Restart auto negotiation */
+-#define MII_CR_POWER_DOWN 0x0800 /* Power down */
+-#define MII_CR_AUTO_NEG_EN 0x1000 /* Auto Neg Enable */
+-#define MII_CR_LOOPBACK 0x4000 /* 0 = normal, 1 = loopback */
+-#define MII_CR_RESET 0x8000 /* 0 = normal, 1 = PHY reset */
+-#define MII_CR_SPEED_1000 0x0040
+-#define MII_CR_SPEED_100 0x2000
+-#define MII_CR_SPEED_10 0x0000
++#define MII_CR_SPEED_SELECT_MSB 0x0040 /* bits 6,13: 10=1000, 01=100, 00=10 */
++#define MII_CR_COLL_TEST_ENABLE 0x0080 /* Collision test enable */
++#define MII_CR_FULL_DUPLEX 0x0100 /* FDX =1, half duplex =0 */
++#define MII_CR_RESTART_AUTO_NEG 0x0200 /* Restart auto negotiation */
++#define MII_CR_ISOLATE 0x0400 /* Isolate PHY from MII */
++#define MII_CR_POWER_DOWN 0x0800 /* Power down */
++#define MII_CR_AUTO_NEG_EN 0x1000 /* Auto Neg Enable */
++#define MII_CR_SPEED_SELECT_LSB 0x2000 /* bits 6,13: 10=1000, 01=100, 00=10 */
++#define MII_CR_LOOPBACK 0x4000 /* 0 = normal, 1 = loopback */
++#define MII_CR_RESET 0x8000 /* 0 = normal, 1 = PHY reset */
++#define MII_CR_SPEED_1000 0x0040
++#define MII_CR_SPEED_100 0x2000
++#define MII_CR_SPEED_10 0x0000
+
+ /* PHY Status Register */
+-#define MII_SR_LINK_STATUS 0x0004 /* Link Status 1 = link */
+-#define MII_SR_AUTONEG_COMPLETE 0x0020 /* Auto Neg Complete */
++#define MII_SR_EXTENDED_CAPS 0x0001 /* Extended register capabilities */
++#define MII_SR_JABBER_DETECT 0x0002 /* Jabber Detected */
++#define MII_SR_LINK_STATUS 0x0004 /* Link Status 1 = link */
++#define MII_SR_AUTONEG_CAPS 0x0008 /* Auto Neg Capable */
++#define MII_SR_REMOTE_FAULT 0x0010 /* Remote Fault Detect */
++#define MII_SR_AUTONEG_COMPLETE 0x0020 /* Auto Neg Complete */
++#define MII_SR_PREAMBLE_SUPPRESS 0x0040 /* Preamble may be suppressed */
++#define MII_SR_EXTENDED_STATUS 0x0100 /* Ext. status info in Reg 0x0F */
++#define MII_SR_100T2_HD_CAPS 0x0200 /* 100T2 Half Duplex Capable */
++#define MII_SR_100T2_FD_CAPS 0x0400 /* 100T2 Full Duplex Capable */
++#define MII_SR_10T_HD_CAPS 0x0800 /* 10T Half Duplex Capable */
++#define MII_SR_10T_FD_CAPS 0x1000 /* 10T Full Duplex Capable */
++#define MII_SR_100X_HD_CAPS 0x2000 /* 100X Half Duplex Capable */
++#define MII_SR_100X_FD_CAPS 0x4000 /* 100X Full Duplex Capable */
++#define MII_SR_100T4_CAPS 0x8000 /* 100T4 Capable */
+
+ /* Autoneg Advertisement Register */
+-#define NWAY_AR_10T_HD_CAPS 0x0020 /* 10T Half Duplex Capable */
+-#define NWAY_AR_10T_FD_CAPS 0x0040 /* 10T Full Duplex Capable */
+-#define NWAY_AR_100TX_HD_CAPS 0x0080 /* 100TX Half Duplex Capable */
+-#define NWAY_AR_100TX_FD_CAPS 0x0100 /* 100TX Full Duplex Capable */
+-#define NWAY_AR_PAUSE 0x0400 /* Pause operation desired */
+-#define NWAY_AR_ASM_DIR 0x0800 /* Asymmetric Pause Direction bit */
++#define NWAY_AR_SELECTOR_FIELD 0x0001 /* indicates IEEE 802.3 CSMA/CD */
++#define NWAY_AR_10T_HD_CAPS 0x0020 /* 10T Half Duplex Capable */
++#define NWAY_AR_10T_FD_CAPS 0x0040 /* 10T Full Duplex Capable */
++#define NWAY_AR_100TX_HD_CAPS 0x0080 /* 100TX Half Duplex Capable */
++#define NWAY_AR_100TX_FD_CAPS 0x0100 /* 100TX Full Duplex Capable */
++#define NWAY_AR_100T4_CAPS 0x0200 /* 100T4 Capable */
++#define NWAY_AR_PAUSE 0x0400 /* Pause operation desired */
++#define NWAY_AR_ASM_DIR 0x0800 /* Asymmetric Pause Direction bit */
++#define NWAY_AR_REMOTE_FAULT 0x2000 /* Remote Fault detected */
++#define NWAY_AR_NEXT_PAGE 0x8000 /* Next Page ability supported */
+
+ /* Link Partner Ability Register (Base Page) */
+-#define NWAY_LPAR_PAUSE 0x0400 /* LP Pause operation desired */
+-#define NWAY_LPAR_ASM_DIR 0x0800 /* LP Asymmetric Pause Direction bit */
++#define NWAY_LPAR_SELECTOR_FIELD 0x0000 /* LP protocol selector field */
++#define NWAY_LPAR_10T_HD_CAPS 0x0020 /* LP 10T Half Dplx Capable */
++#define NWAY_LPAR_10T_FD_CAPS 0x0040 /* LP 10T Full Dplx Capable */
++#define NWAY_LPAR_100TX_HD_CAPS 0x0080 /* LP 100TX Half Dplx Capable */
++#define NWAY_LPAR_100TX_FD_CAPS 0x0100 /* LP 100TX Full Dplx Capable */
++#define NWAY_LPAR_100T4_CAPS 0x0200 /* LP is 100T4 Capable */
++#define NWAY_LPAR_PAUSE 0x0400 /* LP Pause operation desired */
++#define NWAY_LPAR_ASM_DIR 0x0800 /* LP Asym Pause Direction bit */
++#define NWAY_LPAR_REMOTE_FAULT 0x2000 /* LP detected Remote Fault */
++#define NWAY_LPAR_ACKNOWLEDGE 0x4000 /* LP rx'd link code word */
++#define NWAY_LPAR_NEXT_PAGE 0x8000 /* Next Page ability supported */
+
+ /* Autoneg Expansion Register */
++#define NWAY_ER_LP_NWAY_CAPS 0x0001 /* LP has Auto Neg Capability */
++#define NWAY_ER_PAGE_RXD 0x0002 /* LP 10T Half Dplx Capable */
++#define NWAY_ER_NEXT_PAGE_CAPS 0x0004 /* LP 10T Full Dplx Capable */
++#define NWAY_ER_LP_NEXT_PAGE_CAPS 0x0008 /* LP 100TX Half Dplx Capable */
++#define NWAY_ER_PAR_DETECT_FAULT 0x0010 /* LP 100TX Full Dplx Capable */
+
+ /* 1000BASE-T Control Register */
+-#define CR_1000T_HD_CAPS 0x0100 /* Advertise 1000T HD capability */
+-#define CR_1000T_FD_CAPS 0x0200 /* Advertise 1000T FD capability */
+-#define CR_1000T_MS_VALUE 0x0800 /* 1=Configure PHY as Master */
+- /* 0=Configure PHY as Slave */
+-#define CR_1000T_MS_ENABLE 0x1000 /* 1=Master/Slave manual config value */
+- /* 0=Automatic Master/Slave config */
++#define CR_1000T_ASYM_PAUSE 0x0080 /* Advertise asymmetric pause bit */
++#define CR_1000T_HD_CAPS 0x0100 /* Advertise 1000T HD capability */
++#define CR_1000T_FD_CAPS 0x0200 /* Advertise 1000T FD capability */
++/* 1=Repeater/switch device port 0=DTE device */
++#define CR_1000T_REPEATER_DTE 0x0400
++/* 1=Configure PHY as Master 0=Configure PHY as Slave */
++#define CR_1000T_MS_VALUE 0x0800
++/* 1=Master/Slave manual config value 0=Automatic Master/Slave config */
++#define CR_1000T_MS_ENABLE 0x1000
++#define CR_1000T_TEST_MODE_NORMAL 0x0000 /* Normal Operation */
++#define CR_1000T_TEST_MODE_1 0x2000 /* Transmit Waveform test */
++#define CR_1000T_TEST_MODE_2 0x4000 /* Master Transmit Jitter test */
++#define CR_1000T_TEST_MODE_3 0x6000 /* Slave Transmit Jitter test */
++#define CR_1000T_TEST_MODE_4 0x8000 /* Transmitter Distortion test */
+
+ /* 1000BASE-T Status Register */
+-#define SR_1000T_REMOTE_RX_STATUS 0x1000 /* Remote receiver OK */
+-#define SR_1000T_LOCAL_RX_STATUS 0x2000 /* Local receiver OK */
++#define SR_1000T_IDLE_ERROR_CNT 0x00FF /* Num idle err since last rd */
++#define SR_1000T_ASYM_PAUSE_DIR 0x0100 /* LP asym pause direction bit */
++#define SR_1000T_LP_HD_CAPS 0x0400 /* LP is 1000T HD capable */
++#define SR_1000T_LP_FD_CAPS 0x0800 /* LP is 1000T FD capable */
++#define SR_1000T_REMOTE_RX_STATUS 0x1000 /* Remote receiver OK */
++#define SR_1000T_LOCAL_RX_STATUS 0x2000 /* Local receiver OK */
++#define SR_1000T_MS_CONFIG_RES 0x4000 /* 1=Local Tx Master, 0=Slave */
++#define SR_1000T_MS_CONFIG_FAULT 0x8000 /* Master/Slave config fault */
+
++#define SR_1000T_PHY_EXCESSIVE_IDLE_ERR_COUNT 5
+
+ /* PHY 1000 MII Register/Bit Definitions */
+ /* PHY Registers defined by IEEE */
+-#define PHY_CONTROL 0x00 /* Control Register */
+-#define PHY_STATUS 0x01 /* Status Register */
+-#define PHY_ID1 0x02 /* Phy Id Reg (word 1) */
+-#define PHY_ID2 0x03 /* Phy Id Reg (word 2) */
+-#define PHY_AUTONEG_ADV 0x04 /* Autoneg Advertisement */
+-#define PHY_LP_ABILITY 0x05 /* Link Partner Ability (Base Page) */
+-#define PHY_1000T_CTRL 0x09 /* 1000Base-T Control Reg */
+-#define PHY_1000T_STATUS 0x0A /* 1000Base-T Status Reg */
++#define PHY_CONTROL 0x00 /* Control Register */
++#define PHY_STATUS 0x01 /* Status Register */
++#define PHY_ID1 0x02 /* Phy Id Reg (word 1) */
++#define PHY_ID2 0x03 /* Phy Id Reg (word 2) */
++#define PHY_AUTONEG_ADV 0x04 /* Autoneg Advertisement */
++#define PHY_LP_ABILITY 0x05 /* Link Partner Ability (Base Page) */
++#define PHY_AUTONEG_EXP 0x06 /* Autoneg Expansion Reg */
++#define PHY_NEXT_PAGE_TX 0x07 /* Next Page Tx */
++#define PHY_LP_NEXT_PAGE 0x08 /* Link Partner Next Page */
++#define PHY_1000T_CTRL 0x09 /* 1000Base-T Control Reg */
++#define PHY_1000T_STATUS 0x0A /* 1000Base-T Status Reg */
++#define PHY_EXT_STATUS 0x0F /* Extended Status Reg */
++
++#define PHY_CONTROL_LB 0x4000 /* PHY Loopback bit */
+
+ /* NVM Control */
+-#define E1000_EECD_SK 0x00000001 /* NVM Clock */
+-#define E1000_EECD_CS 0x00000002 /* NVM Chip Select */
+-#define E1000_EECD_DI 0x00000004 /* NVM Data In */
+-#define E1000_EECD_DO 0x00000008 /* NVM Data Out */
+-#define E1000_EECD_REQ 0x00000040 /* NVM Access Request */
+-#define E1000_EECD_GNT 0x00000080 /* NVM Access Grant */
+-#define E1000_EECD_PRES 0x00000100 /* NVM Present */
++#define E1000_EECD_SK 0x00000001 /* NVM Clock */
++#define E1000_EECD_CS 0x00000002 /* NVM Chip Select */
++#define E1000_EECD_DI 0x00000004 /* NVM Data In */
++#define E1000_EECD_DO 0x00000008 /* NVM Data Out */
++#define E1000_EECD_REQ 0x00000040 /* NVM Access Request */
++#define E1000_EECD_GNT 0x00000080 /* NVM Access Grant */
++#define E1000_EECD_PRES 0x00000100 /* NVM Present */
++#define E1000_EECD_SIZE 0x00000200 /* NVM Size (0=64 word 1=256 word) */
++#define E1000_EECD_BLOCKED 0x00008000 /* Bit banging access blocked flag */
++#define E1000_EECD_ABORT 0x00010000 /* NVM operation aborted flag */
++#define E1000_EECD_TIMEOUT 0x00020000 /* NVM read operation timeout flag */
++#define E1000_EECD_ERROR_CLR 0x00040000 /* NVM error status clear bit */
+ /* NVM Addressing bits based on type 0=small, 1=large */
+-#define E1000_EECD_ADDR_BITS 0x00000400
+-#define E1000_NVM_GRANT_ATTEMPTS 1000 /* NVM # attempts to gain grant */
+-#define E1000_EECD_AUTO_RD 0x00000200 /* NVM Auto Read done */
+-#define E1000_EECD_SIZE_EX_MASK 0x00007800 /* NVM Size */
+-#define E1000_EECD_SIZE_EX_SHIFT 11
++#define E1000_EECD_ADDR_BITS 0x00000400
++#define E1000_NVM_GRANT_ATTEMPTS 1000 /* NVM # attempts to gain grant */
++#define E1000_EECD_AUTO_RD 0x00000200 /* NVM Auto Read done */
++#define E1000_EECD_SIZE_EX_MASK 0x00007800 /* NVM Size */
++#define E1000_EECD_SIZE_EX_SHIFT 11
++#define E1000_EECD_FLUPD 0x00080000 /* Update FLASH */
++#define E1000_EECD_AUPDEN 0x00100000 /* Ena Auto FLASH update */
++#define E1000_EECD_SEC1VAL 0x00400000 /* Sector One Valid */
++#define E1000_EECD_SEC1VAL_VALID_MASK (E1000_EECD_AUTO_RD | E1000_EECD_PRES)
+ #define E1000_EECD_FLUPD_I210 0x00800000 /* Update FLASH */
+-#define E1000_EECD_FLUDONE_I210 0x04000000 /* Update FLASH done*/
++#define E1000_EECD_FLUDONE_I210 0x04000000 /* Update FLASH done */
+ #define E1000_EECD_FLASH_DETECTED_I210 0x00080000 /* FLASH detected */
++#define E1000_EECD_SEC1VAL_I210 0x02000000 /* Sector One Valid */
+ #define E1000_FLUDONE_ATTEMPTS 20000
+ #define E1000_EERD_EEWR_MAX_COUNT 512 /* buffered EEPROM words rw */
+ #define E1000_I210_FIFO_SEL_RX 0x00
+@@ -700,53 +969,32 @@
+ #define E1000_I210_FIFO_SEL_TX_LEGACY E1000_I210_FIFO_SEL_TX_QAV(0)
+ #define E1000_I210_FIFO_SEL_BMC2OS_TX 0x06
+ #define E1000_I210_FIFO_SEL_BMC2OS_RX 0x01
++
+ #define E1000_I210_FLASH_SECTOR_SIZE 0x1000 /* 4KB FLASH sector unit size */
+ /* Secure FLASH mode requires removing MSb */
+ #define E1000_I210_FW_PTR_MASK 0x7FFF
+ /* Firmware code revision field word offset*/
+ #define E1000_I210_FW_VER_OFFSET 328
+-#define E1000_EECD_FLUPD_I210 0x00800000 /* Update FLASH */
+-#define E1000_EECD_FLUDONE_I210 0x04000000 /* Update FLASH done*/
+-#define E1000_FLUDONE_ATTEMPTS 20000
+-#define E1000_EERD_EEWR_MAX_COUNT 512 /* buffered EEPROM words rw */
+-#define E1000_I210_FIFO_SEL_RX 0x00
+-#define E1000_I210_FIFO_SEL_TX_QAV(_i) (0x02 + (_i))
+-#define E1000_I210_FIFO_SEL_TX_LEGACY E1000_I210_FIFO_SEL_TX_QAV(0)
+-#define E1000_I210_FIFO_SEL_BMC2OS_TX 0x06
+-#define E1000_I210_FIFO_SEL_BMC2OS_RX 0x01
+-
+
+-/* Offset to data in NVM read/write registers */
+-#define E1000_NVM_RW_REG_DATA 16
+-#define E1000_NVM_RW_REG_DONE 2 /* Offset to READ/WRITE done bit */
+-#define E1000_NVM_RW_REG_START 1 /* Start operation */
+-#define E1000_NVM_RW_ADDR_SHIFT 2 /* Shift to the address bits */
+-#define E1000_NVM_POLL_READ 0 /* Flag for polling for read complete */
++#define E1000_NVM_RW_REG_DATA 16 /* Offset to data in NVM read/write regs */
++#define E1000_NVM_RW_REG_DONE 2 /* Offset to READ/WRITE done bit */
++#define E1000_NVM_RW_REG_START 1 /* Start operation */
++#define E1000_NVM_RW_ADDR_SHIFT 2 /* Shift to the address bits */
++#define E1000_NVM_POLL_WRITE 1 /* Flag for polling for write complete */
++#define E1000_NVM_POLL_READ 0 /* Flag for polling for read complete */
++#define E1000_FLASH_UPDATES 2000
+
+ /* NVM Word Offsets */
+-#define NVM_COMPAT 0x0003
+-#define NVM_ID_LED_SETTINGS 0x0004 /* SERDES output amplitude */
+-#define NVM_VERSION 0x0005
+-#define NVM_INIT_CONTROL2_REG 0x000F
+-#define NVM_INIT_CONTROL3_PORT_B 0x0014
+-#define NVM_INIT_CONTROL3_PORT_A 0x0024
+-#define NVM_ALT_MAC_ADDR_PTR 0x0037
+-#define NVM_CHECKSUM_REG 0x003F
+-#define NVM_COMPATIBILITY_REG_3 0x0003
+-#define NVM_COMPATIBILITY_BIT_MASK 0x8000
+-#define NVM_MAC_ADDR 0x0000
+-#define NVM_SUB_DEV_ID 0x000B
+-#define NVM_SUB_VEN_ID 0x000C
+-#define NVM_DEV_ID 0x000D
+-#define NVM_VEN_ID 0x000E
+-#define NVM_INIT_CTRL_2 0x000F
+-#define NVM_INIT_CTRL_4 0x0013
+-#define NVM_LED_1_CFG 0x001C
+-#define NVM_LED_0_2_CFG 0x001F
+-#define NVM_ETRACK_WORD 0x0042
+-#define NVM_ETRACK_HIWORD 0x0043
+-#define NVM_COMB_VER_OFF 0x0083
+-#define NVM_COMB_VER_PTR 0x003d
++#define NVM_COMPAT 0x0003
++#define NVM_ID_LED_SETTINGS 0x0004
++#define NVM_VERSION 0x0005
++#define E1000_I210_NVM_FW_MODULE_PTR 0x0010
++#define E1000_I350_NVM_FW_MODULE_PTR 0x0051
++#define NVM_FUTURE_INIT_WORD1 0x0019
++#define NVM_ETRACK_WORD 0x0042
++#define NVM_ETRACK_HIWORD 0x0043
++#define NVM_COMB_VER_OFF 0x0083
++#define NVM_COMB_VER_PTR 0x003d
+
+ /* NVM version defines */
+ #define NVM_MAJOR_MASK 0xF000
+@@ -763,6 +1011,31 @@
+ #define NVM_HEX_CONV 16
+ #define NVM_HEX_TENS 10
+
++/* FW version defines */
++/* Offset of "Loader patch ptr" in Firmware Header */
++#define E1000_I350_NVM_FW_LOADER_PATCH_PTR_OFFSET 0x01
++/* Patch generation hour & minutes */
++#define E1000_I350_NVM_FW_VER_WORD1_OFFSET 0x04
++/* Patch generation month & day */
++#define E1000_I350_NVM_FW_VER_WORD2_OFFSET 0x05
++/* Patch generation year */
++#define E1000_I350_NVM_FW_VER_WORD3_OFFSET 0x06
++/* Patch major & minor numbers */
++#define E1000_I350_NVM_FW_VER_WORD4_OFFSET 0x07
++
++#define NVM_MAC_ADDR 0x0000
++#define NVM_SUB_DEV_ID 0x000B
++#define NVM_SUB_VEN_ID 0x000C
++#define NVM_DEV_ID 0x000D
++#define NVM_VEN_ID 0x000E
++#define NVM_INIT_CTRL_2 0x000F
++#define NVM_INIT_CTRL_4 0x0013
++#define NVM_LED_1_CFG 0x001C
++#define NVM_LED_0_2_CFG 0x001F
++
++#define NVM_COMPAT_VALID_CSUM 0x0001
++#define NVM_FUTURE_INIT_WORD1_VALID_CSUM 0x0040
++
+ #define NVM_ETS_CFG 0x003E
+ #define NVM_ETS_LTHRES_DELTA_MASK 0x07C0
+ #define NVM_ETS_LTHRES_DELTA_SHIFT 6
+@@ -775,236 +1048,292 @@
+ #define NVM_ETS_DATA_INDEX_MASK 0x0300
+ #define NVM_ETS_DATA_INDEX_SHIFT 8
+ #define NVM_ETS_DATA_HTHRESH_MASK 0x00FF
++#define NVM_INIT_CONTROL2_REG 0x000F
++#define NVM_INIT_CONTROL3_PORT_B 0x0014
++#define NVM_INIT_3GIO_3 0x001A
++#define NVM_SWDEF_PINS_CTRL_PORT_0 0x0020
++#define NVM_INIT_CONTROL3_PORT_A 0x0024
++#define NVM_CFG 0x0012
++#define NVM_ALT_MAC_ADDR_PTR 0x0037
++#define NVM_CHECKSUM_REG 0x003F
++#define NVM_COMPATIBILITY_REG_3 0x0003
++#define NVM_COMPATIBILITY_BIT_MASK 0x8000
++
++#define E1000_NVM_CFG_DONE_PORT_0 0x040000 /* MNG config cycle done */
++#define E1000_NVM_CFG_DONE_PORT_1 0x080000 /* ...for second port */
++#define E1000_NVM_CFG_DONE_PORT_2 0x100000 /* ...for third port */
++#define E1000_NVM_CFG_DONE_PORT_3 0x200000 /* ...for fourth port */
+
+-#define E1000_NVM_CFG_DONE_PORT_0 0x040000 /* MNG config cycle done */
+-#define E1000_NVM_CFG_DONE_PORT_1 0x080000 /* ...for second port */
+-#define E1000_NVM_CFG_DONE_PORT_2 0x100000 /* ...for third port */
+-#define E1000_NVM_CFG_DONE_PORT_3 0x200000 /* ...for fourth port */
+-
+-#define NVM_82580_LAN_FUNC_OFFSET(a) (a ? (0x40 + (0x40 * a)) : 0)
++#define NVM_82580_LAN_FUNC_OFFSET(a) ((a) ? (0x40 + (0x40 * (a))) : 0)
+
+ /* Mask bits for fields in Word 0x24 of the NVM */
+-#define NVM_WORD24_COM_MDIO 0x0008 /* MDIO interface shared */
+-#define NVM_WORD24_EXT_MDIO 0x0004 /* MDIO accesses routed external */
++#define NVM_WORD24_COM_MDIO 0x0008 /* MDIO interface shared */
++#define NVM_WORD24_EXT_MDIO 0x0004 /* MDIO accesses routed extrnl */
++/* Offset of Link Mode bits for 82575/82576 */
++#define NVM_WORD24_LNK_MODE_OFFSET 8
++/* Offset of Link Mode bits for 82580 up */
++#define NVM_WORD24_82580_LNK_MODE_OFFSET 4
+
+ /* Mask bits for fields in Word 0x0f of the NVM */
+-#define NVM_WORD0F_PAUSE_MASK 0x3000
+-#define NVM_WORD0F_ASM_DIR 0x2000
++#define NVM_WORD0F_PAUSE_MASK 0x3000
++#define NVM_WORD0F_PAUSE 0x1000
++#define NVM_WORD0F_ASM_DIR 0x2000
+
+ /* Mask bits for fields in Word 0x1a of the NVM */
++#define NVM_WORD1A_ASPM_MASK 0x000C
+
+-/* length of string needed to store part num */
+-#define E1000_PBANUM_LENGTH 11
++/* Mask bits for fields in Word 0x03 of the EEPROM */
++#define NVM_COMPAT_LOM 0x0800
++
++/* length of string needed to store PBA number */
++#define E1000_PBANUM_LENGTH 11
+
+ /* For checksumming, the sum of all words in the NVM should equal 0xBABA. */
+-#define NVM_SUM 0xBABA
++#define NVM_SUM 0xBABA
+
+-#define NVM_PBA_OFFSET_0 8
+-#define NVM_PBA_OFFSET_1 9
++/* PBA (printed board assembly) number words */
++#define NVM_PBA_OFFSET_0 8
++#define NVM_PBA_OFFSET_1 9
++#define NVM_PBA_PTR_GUARD 0xFAFA
+ #define NVM_RESERVED_WORD 0xFFFF
+-#define NVM_PBA_PTR_GUARD 0xFAFA
+-#define NVM_WORD_SIZE_BASE_SHIFT 6
+-
+-/* NVM Commands - Microwire */
++#define NVM_WORD_SIZE_BASE_SHIFT 6
+
+ /* NVM Commands - SPI */
+-#define NVM_MAX_RETRY_SPI 5000 /* Max wait of 5ms, for RDY signal */
+-#define NVM_WRITE_OPCODE_SPI 0x02 /* NVM write opcode */
+-#define NVM_READ_OPCODE_SPI 0x03 /* NVM read opcode */
+-#define NVM_A8_OPCODE_SPI 0x08 /* opcode bit-3 = address bit-8 */
+-#define NVM_WREN_OPCODE_SPI 0x06 /* NVM set Write Enable latch */
+-#define NVM_RDSR_OPCODE_SPI 0x05 /* NVM read Status register */
++#define NVM_MAX_RETRY_SPI 5000 /* Max wait of 5ms, for RDY signal */
++#define NVM_READ_OPCODE_SPI 0x03 /* NVM read opcode */
++#define NVM_WRITE_OPCODE_SPI 0x02 /* NVM write opcode */
++#define NVM_A8_OPCODE_SPI 0x08 /* opcode bit-3 = address bit-8 */
++#define NVM_WREN_OPCODE_SPI 0x06 /* NVM set Write Enable latch */
++#define NVM_RDSR_OPCODE_SPI 0x05 /* NVM read Status register */
+
+ /* SPI NVM Status Register */
+-#define NVM_STATUS_RDY_SPI 0x01
++#define NVM_STATUS_RDY_SPI 0x01
+
+ /* Word definitions for ID LED Settings */
+-#define ID_LED_RESERVED_0000 0x0000
+-#define ID_LED_RESERVED_FFFF 0xFFFF
+-#define ID_LED_DEFAULT ((ID_LED_OFF1_ON2 << 12) | \
+- (ID_LED_OFF1_OFF2 << 8) | \
+- (ID_LED_DEF1_DEF2 << 4) | \
+- (ID_LED_DEF1_DEF2))
+-#define ID_LED_DEF1_DEF2 0x1
+-#define ID_LED_DEF1_ON2 0x2
+-#define ID_LED_DEF1_OFF2 0x3
+-#define ID_LED_ON1_DEF2 0x4
+-#define ID_LED_ON1_ON2 0x5
+-#define ID_LED_ON1_OFF2 0x6
+-#define ID_LED_OFF1_DEF2 0x7
+-#define ID_LED_OFF1_ON2 0x8
+-#define ID_LED_OFF1_OFF2 0x9
+-
+-#define IGP_ACTIVITY_LED_MASK 0xFFFFF0FF
+-#define IGP_ACTIVITY_LED_ENABLE 0x0300
+-#define IGP_LED3_MODE 0x07000000
++#define ID_LED_RESERVED_0000 0x0000
++#define ID_LED_RESERVED_FFFF 0xFFFF
++#define ID_LED_DEFAULT ((ID_LED_OFF1_ON2 << 12) | \
++ (ID_LED_OFF1_OFF2 << 8) | \
++ (ID_LED_DEF1_DEF2 << 4) | \
++ (ID_LED_DEF1_DEF2))
++#define ID_LED_DEF1_DEF2 0x1
++#define ID_LED_DEF1_ON2 0x2
++#define ID_LED_DEF1_OFF2 0x3
++#define ID_LED_ON1_DEF2 0x4
++#define ID_LED_ON1_ON2 0x5
++#define ID_LED_ON1_OFF2 0x6
++#define ID_LED_OFF1_DEF2 0x7
++#define ID_LED_OFF1_ON2 0x8
++#define ID_LED_OFF1_OFF2 0x9
++
++#define IGP_ACTIVITY_LED_MASK 0xFFFFF0FF
++#define IGP_ACTIVITY_LED_ENABLE 0x0300
++#define IGP_LED3_MODE 0x07000000
+
+ /* PCI/PCI-X/PCI-EX Config space */
+-#define PCIE_DEVICE_CONTROL2 0x28
+-#define PCIE_DEVICE_CONTROL2_16ms 0x0005
++#define PCIX_COMMAND_REGISTER 0xE6
++#define PCIX_STATUS_REGISTER_LO 0xE8
++#define PCIX_STATUS_REGISTER_HI 0xEA
++#define PCI_HEADER_TYPE_REGISTER 0x0E
++#define PCIE_LINK_STATUS 0x12
++#define PCIE_DEVICE_CONTROL2 0x28
++
++#define PCIX_COMMAND_MMRBC_MASK 0x000C
++#define PCIX_COMMAND_MMRBC_SHIFT 0x2
++#define PCIX_STATUS_HI_MMRBC_MASK 0x0060
++#define PCIX_STATUS_HI_MMRBC_SHIFT 0x5
++#define PCIX_STATUS_HI_MMRBC_4K 0x3
++#define PCIX_STATUS_HI_MMRBC_2K 0x2
++#define PCIX_STATUS_LO_FUNC_MASK 0x7
++#define PCI_HEADER_TYPE_MULTIFUNC 0x80
++#define PCIE_LINK_WIDTH_MASK 0x3F0
++#define PCIE_LINK_WIDTH_SHIFT 4
++#define PCIE_LINK_SPEED_MASK 0x0F
++#define PCIE_LINK_SPEED_2500 0x01
++#define PCIE_LINK_SPEED_5000 0x02
++#define PCIE_DEVICE_CONTROL2_16ms 0x0005
+
+-#define PHY_REVISION_MASK 0xFFFFFFF0
+-#define MAX_PHY_REG_ADDRESS 0x1F /* 5 bit address bus (0-0x1F) */
+-#define MAX_PHY_MULTI_PAGE_REG 0xF
++#ifndef ETH_ADDR_LEN
++#define ETH_ADDR_LEN 6
++#endif
++
++#define PHY_REVISION_MASK 0xFFFFFFF0
++#define MAX_PHY_REG_ADDRESS 0x1F /* 5 bit address bus (0-0x1F) */
++#define MAX_PHY_MULTI_PAGE_REG 0xF
+
+-/* Bit definitions for valid PHY IDs. */
+-/* I = Integrated
++/* Bit definitions for valid PHY IDs.
++ * I = Integrated
+ * E = External
+ */
+-#define M88E1111_I_PHY_ID 0x01410CC0
+-#define M88E1112_E_PHY_ID 0x01410C90
+-#define I347AT4_E_PHY_ID 0x01410DC0
+-#define IGP03E1000_E_PHY_ID 0x02A80390
+-#define I82580_I_PHY_ID 0x015403A0
+-#define I350_I_PHY_ID 0x015403B0
+-#define M88_VENDOR 0x0141
+-#define I210_I_PHY_ID 0x01410C00
+-#define M88E1543_E_PHY_ID 0x01410EA0
++#define M88E1000_E_PHY_ID 0x01410C50
++#define M88E1000_I_PHY_ID 0x01410C30
++#define M88E1011_I_PHY_ID 0x01410C20
++#define IGP01E1000_I_PHY_ID 0x02A80380
++#define M88E1111_I_PHY_ID 0x01410CC0
++#define M88E1543_E_PHY_ID 0x01410EA0
++#define M88E1512_E_PHY_ID 0x01410DD0
++#define M88E1112_E_PHY_ID 0x01410C90
++#define I347AT4_E_PHY_ID 0x01410DC0
++#define M88E1340M_E_PHY_ID 0x01410DF0
++#define GG82563_E_PHY_ID 0x01410CA0
++#define IGP03E1000_E_PHY_ID 0x02A80390
++#define IFE_E_PHY_ID 0x02A80330
++#define IFE_PLUS_E_PHY_ID 0x02A80320
++#define IFE_C_E_PHY_ID 0x02A80310
++#define I82580_I_PHY_ID 0x015403A0
++#define I350_I_PHY_ID 0x015403B0
++#define I210_I_PHY_ID 0x01410C00
++#define IGP04E1000_E_PHY_ID 0x02A80391
++#define M88_VENDOR 0x0141
+
+ /* M88E1000 Specific Registers */
+-#define M88E1000_PHY_SPEC_CTRL 0x10 /* PHY Specific Control Register */
+-#define M88E1000_PHY_SPEC_STATUS 0x11 /* PHY Specific Status Register */
+-#define M88E1000_EXT_PHY_SPEC_CTRL 0x14 /* Extended PHY Specific Control */
++#define M88E1000_PHY_SPEC_CTRL 0x10 /* PHY Specific Control Reg */
++#define M88E1000_PHY_SPEC_STATUS 0x11 /* PHY Specific Status Reg */
++#define M88E1000_EXT_PHY_SPEC_CTRL 0x14 /* Extended PHY Specific Cntrl */
++#define M88E1000_RX_ERR_CNTR 0x15 /* Receive Error Counter */
+
+-#define M88E1000_PHY_PAGE_SELECT 0x1D /* Reg 29 for page number setting */
+-#define M88E1000_PHY_GEN_CONTROL 0x1E /* Its meaning depends on reg 29 */
++#define M88E1000_PHY_PAGE_SELECT 0x1D /* Reg 29 for pg number setting */
++#define M88E1000_PHY_GEN_CONTROL 0x1E /* meaning depends on reg 29 */
+
+ /* M88E1000 PHY Specific Control Register */
+-#define M88E1000_PSCR_POLARITY_REVERSAL 0x0002 /* 1=Polarity Reversal enabled */
+-/* 1=CLK125 low, 0=CLK125 toggling */
+-#define M88E1000_PSCR_MDI_MANUAL_MODE 0x0000 /* MDI Crossover Mode bits 6:5 */
+- /* Manual MDI configuration */
+-#define M88E1000_PSCR_MDIX_MANUAL_MODE 0x0020 /* Manual MDIX configuration */
++#define M88E1000_PSCR_POLARITY_REVERSAL 0x0002 /* 1=Polarity Reverse enabled */
++/* MDI Crossover Mode bits 6:5 Manual MDI configuration */
++#define M88E1000_PSCR_MDI_MANUAL_MODE 0x0000
++#define M88E1000_PSCR_MDIX_MANUAL_MODE 0x0020 /* Manual MDIX configuration */
+ /* 1000BASE-T: Auto crossover, 100BASE-TX/10BASE-T: MDI Mode */
+-#define M88E1000_PSCR_AUTO_X_1000T 0x0040
++#define M88E1000_PSCR_AUTO_X_1000T 0x0040
+ /* Auto crossover enabled all speeds */
+-#define M88E1000_PSCR_AUTO_X_MODE 0x0060
+-/* 1=Enable Extended 10BASE-T distance (Lower 10BASE-T Rx Threshold
+- * 0=Normal 10BASE-T Rx Threshold
+- */
+-/* 1=5-bit interface in 100BASE-TX, 0=MII interface in 100BASE-TX */
+-#define M88E1000_PSCR_ASSERT_CRS_ON_TX 0x0800 /* 1=Assert CRS on Transmit */
++#define M88E1000_PSCR_AUTO_X_MODE 0x0060
++#define M88E1000_PSCR_ASSERT_CRS_ON_TX 0x0800 /* 1=Assert CRS on Tx */
+
+ /* M88E1000 PHY Specific Status Register */
+-#define M88E1000_PSSR_REV_POLARITY 0x0002 /* 1=Polarity reversed */
+-#define M88E1000_PSSR_DOWNSHIFT 0x0020 /* 1=Downshifted */
+-#define M88E1000_PSSR_MDIX 0x0040 /* 1=MDIX; 0=MDI */
++#define M88E1000_PSSR_REV_POLARITY 0x0002 /* 1=Polarity reversed */
++#define M88E1000_PSSR_DOWNSHIFT 0x0020 /* 1=Downshifted */
++#define M88E1000_PSSR_MDIX 0x0040 /* 1=MDIX; 0=MDI */
+ /* 0 = <50M
+ * 1 = 50-80M
+ * 2 = 80-110M
+ * 3 = 110-140M
+ * 4 = >140M
+ */
+-#define M88E1000_PSSR_CABLE_LENGTH 0x0380
+-#define M88E1000_PSSR_SPEED 0xC000 /* Speed, bits 14:15 */
+-#define M88E1000_PSSR_1000MBS 0x8000 /* 10=1000Mbs */
+-
+-#define M88E1000_PSSR_CABLE_LENGTH_SHIFT 7
+-
+-/* M88E1000 Extended PHY Specific Control Register */
+-/* 1 = Lost lock detect enabled.
+- * Will assert lost lock and bring
+- * link down if idle not seen
+- * within 1ms in 1000BASE-T
+- */
++#define M88E1000_PSSR_CABLE_LENGTH 0x0380
++#define M88E1000_PSSR_LINK 0x0400 /* 1=Link up, 0=Link down */
++#define M88E1000_PSSR_SPD_DPLX_RESOLVED 0x0800 /* 1=Speed & Duplex resolved */
++#define M88E1000_PSSR_SPEED 0xC000 /* Speed, bits 14:15 */
++#define M88E1000_PSSR_1000MBS 0x8000 /* 10=1000Mbs */
++
++#define M88E1000_PSSR_CABLE_LENGTH_SHIFT 7
++
+ /* Number of times we will attempt to autonegotiate before downshifting if we
+ * are the master
+ */
+-#define M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK 0x0C00
+-#define M88E1000_EPSCR_MASTER_DOWNSHIFT_1X 0x0000
++#define M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK 0x0C00
++#define M88E1000_EPSCR_MASTER_DOWNSHIFT_1X 0x0000
+ /* Number of times we will attempt to autonegotiate before downshifting if we
+ * are the slave
+ */
+-#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK 0x0300
+-#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X 0x0100
+-#define M88E1000_EPSCR_TX_CLK_25 0x0070 /* 25 MHz TX_CLK */
++#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK 0x0300
++#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X 0x0100
++#define M88E1000_EPSCR_TX_CLK_25 0x0070 /* 25 MHz TX_CLK */
++
++/* Intel I347AT4 Registers */
++#define I347AT4_PCDL 0x10 /* PHY Cable Diagnostics Length */
++#define I347AT4_PCDC 0x15 /* PHY Cable Diagnostics Control */
++#define I347AT4_PAGE_SELECT 0x16
+
+-/* Intel i347-AT4 Registers */
++/* I347AT4 Extended PHY Specific Control Register */
+
+-#define I347AT4_PCDL 0x10 /* PHY Cable Diagnostics Length */
+-#define I347AT4_PCDC 0x15 /* PHY Cable Diagnostics Control */
+-#define I347AT4_PAGE_SELECT 0x16
+-
+-/* i347-AT4 Extended PHY Specific Control Register */
+-
+-/* Number of times we will attempt to autonegotiate before downshifting if we
+- * are the master
++/* Number of times we will attempt to autonegotiate before downshifting if we
++ * are the master
+ */
+-#define I347AT4_PSCR_DOWNSHIFT_ENABLE 0x0800
+-#define I347AT4_PSCR_DOWNSHIFT_MASK 0x7000
+-#define I347AT4_PSCR_DOWNSHIFT_1X 0x0000
+-#define I347AT4_PSCR_DOWNSHIFT_2X 0x1000
+-#define I347AT4_PSCR_DOWNSHIFT_3X 0x2000
+-#define I347AT4_PSCR_DOWNSHIFT_4X 0x3000
+-#define I347AT4_PSCR_DOWNSHIFT_5X 0x4000
+-#define I347AT4_PSCR_DOWNSHIFT_6X 0x5000
+-#define I347AT4_PSCR_DOWNSHIFT_7X 0x6000
+-#define I347AT4_PSCR_DOWNSHIFT_8X 0x7000
++#define I347AT4_PSCR_DOWNSHIFT_ENABLE 0x0800
++#define I347AT4_PSCR_DOWNSHIFT_MASK 0x7000
++#define I347AT4_PSCR_DOWNSHIFT_1X 0x0000
++#define I347AT4_PSCR_DOWNSHIFT_2X 0x1000
++#define I347AT4_PSCR_DOWNSHIFT_3X 0x2000
++#define I347AT4_PSCR_DOWNSHIFT_4X 0x3000
++#define I347AT4_PSCR_DOWNSHIFT_5X 0x4000
++#define I347AT4_PSCR_DOWNSHIFT_6X 0x5000
++#define I347AT4_PSCR_DOWNSHIFT_7X 0x6000
++#define I347AT4_PSCR_DOWNSHIFT_8X 0x7000
+
+-/* i347-AT4 PHY Cable Diagnostics Control */
+-#define I347AT4_PCDC_CABLE_LENGTH_UNIT 0x0400 /* 0=cm 1=meters */
++/* I347AT4 PHY Cable Diagnostics Control */
++#define I347AT4_PCDC_CABLE_LENGTH_UNIT 0x0400 /* 0=cm 1=meters */
+
+-/* Marvell 1112 only registers */
+-#define M88E1112_VCT_DSP_DISTANCE 0x001A
++/* M88E1112 only registers */
++#define M88E1112_VCT_DSP_DISTANCE 0x001A
+
+ /* M88EC018 Rev 2 specific DownShift settings */
+-#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK 0x0E00
+-#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X 0x0800
++#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK 0x0E00
++#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X 0x0800
++
++/* Bits...
++ * 15-5: page
++ * 4-0: register offset
++ */
++#define GG82563_PAGE_SHIFT 5
++#define GG82563_REG(page, reg) \
++ (((page) << GG82563_PAGE_SHIFT) | ((reg) & MAX_PHY_REG_ADDRESS))
++#define GG82563_MIN_ALT_REG 30
++
++/* GG82563 Specific Registers */
++#define GG82563_PHY_SPEC_CTRL GG82563_REG(0, 16) /* PHY Spec Cntrl */
++#define GG82563_PHY_PAGE_SELECT GG82563_REG(0, 22) /* Page Select */
++#define GG82563_PHY_SPEC_CTRL_2 GG82563_REG(0, 26) /* PHY Spec Cntrl2 */
++#define GG82563_PHY_PAGE_SELECT_ALT GG82563_REG(0, 29) /* Alt Page Select */
++
++/* MAC Specific Control Register */
++#define GG82563_PHY_MAC_SPEC_CTRL GG82563_REG(2, 21)
++
++#define GG82563_PHY_DSP_DISTANCE GG82563_REG(5, 26) /* DSP Distance */
++
++/* Page 193 - Port Control Registers */
++/* Kumeran Mode Control */
++#define GG82563_PHY_KMRN_MODE_CTRL GG82563_REG(193, 16)
++#define GG82563_PHY_PWR_MGMT_CTRL GG82563_REG(193, 20) /* Pwr Mgt Ctrl */
++
++/* Page 194 - KMRN Registers */
++#define GG82563_PHY_INBAND_CTRL GG82563_REG(194, 18) /* Inband Ctrl */
+
+ /* MDI Control */
+-#define E1000_MDIC_DATA_MASK 0x0000FFFF
+-#define E1000_MDIC_REG_MASK 0x001F0000
+-#define E1000_MDIC_REG_SHIFT 16
+-#define E1000_MDIC_PHY_MASK 0x03E00000
+-#define E1000_MDIC_PHY_SHIFT 21
+-#define E1000_MDIC_OP_WRITE 0x04000000
+-#define E1000_MDIC_OP_READ 0x08000000
+-#define E1000_MDIC_READY 0x10000000
+-#define E1000_MDIC_INT_EN 0x20000000
+-#define E1000_MDIC_ERROR 0x40000000
+-#define E1000_MDIC_DEST 0x80000000
+-
+-/* Thermal Sensor */
+-#define E1000_THSTAT_PWR_DOWN 0x00000001 /* Power Down Event */
+-#define E1000_THSTAT_LINK_THROTTLE 0x00000002 /* Link Speed Throttle Event */
+-
+-/* Energy Efficient Ethernet */
+-#define E1000_IPCNFG_EEE_1G_AN 0x00000008 /* EEE Enable 1G AN */
+-#define E1000_IPCNFG_EEE_100M_AN 0x00000004 /* EEE Enable 100M AN */
+-#define E1000_EEER_TX_LPI_EN 0x00010000 /* EEE Tx LPI Enable */
+-#define E1000_EEER_RX_LPI_EN 0x00020000 /* EEE Rx LPI Enable */
+-#define E1000_EEER_FRC_AN 0x10000000 /* Enable EEE in loopback */
+-#define E1000_EEER_LPI_FC 0x00040000 /* EEE Enable on FC */
+-#define E1000_EEE_SU_LPI_CLK_STP 0X00800000 /* EEE LPI Clock Stop */
+-#define E1000_EEER_EEE_NEG 0x20000000 /* EEE capability nego */
+-#define E1000_EEE_LP_ADV_ADDR_I350 0x040F /* EEE LP Advertisement */
+-#define E1000_EEE_LP_ADV_DEV_I210 7 /* EEE LP Adv Device */
+-#define E1000_EEE_LP_ADV_ADDR_I210 61 /* EEE LP Adv Register */
+-#define E1000_MMDAC_FUNC_DATA 0x4000 /* Data, no post increment */
+-#define E1000_M88E1543_PAGE_ADDR 0x16 /* Page Offset Register */
+-#define E1000_M88E1543_EEE_CTRL_1 0x0
+-#define E1000_M88E1543_EEE_CTRL_1_MS 0x0001 /* EEE Master/Slave */
+-#define E1000_EEE_ADV_DEV_I354 7
+-#define E1000_EEE_ADV_ADDR_I354 60
+-#define E1000_EEE_ADV_100_SUPPORTED (1 << 1) /* 100BaseTx EEE Supported */
+-#define E1000_EEE_ADV_1000_SUPPORTED (1 << 2) /* 1000BaseT EEE Supported */
+-#define E1000_PCS_STATUS_DEV_I354 3
+-#define E1000_PCS_STATUS_ADDR_I354 1
+-#define E1000_PCS_STATUS_TX_LPI_IND 0x0200 /* Tx in LPI state */
+-#define E1000_PCS_STATUS_RX_LPI_RCVD 0x0400
+-#define E1000_PCS_STATUS_TX_LPI_RCVD 0x0800
++#define E1000_MDIC_REG_MASK 0x001F0000
++#define E1000_MDIC_REG_SHIFT 16
++#define E1000_MDIC_PHY_MASK 0x03E00000
++#define E1000_MDIC_PHY_SHIFT 21
++#define E1000_MDIC_OP_WRITE 0x04000000
++#define E1000_MDIC_OP_READ 0x08000000
++#define E1000_MDIC_READY 0x10000000
++#define E1000_MDIC_ERROR 0x40000000
++#define E1000_MDIC_DEST 0x80000000
+
+ /* SerDes Control */
+-#define E1000_GEN_CTL_READY 0x80000000
+-#define E1000_GEN_CTL_ADDRESS_SHIFT 8
+-#define E1000_GEN_POLL_TIMEOUT 640
+-
+-#define E1000_VFTA_ENTRY_SHIFT 5
+-#define E1000_VFTA_ENTRY_MASK 0x7F
+-#define E1000_VFTA_ENTRY_BIT_SHIFT_MASK 0x1F
+-
+-/* DMA Coalescing register fields */
+-#define E1000_PCIEMISC_LX_DECISION 0x00000080 /* Lx power on DMA coal */
++#define E1000_GEN_CTL_READY 0x80000000
++#define E1000_GEN_CTL_ADDRESS_SHIFT 8
++#define E1000_GEN_POLL_TIMEOUT 640
++
++/* LinkSec register fields */
++#define E1000_LSECTXCAP_SUM_MASK 0x00FF0000
++#define E1000_LSECTXCAP_SUM_SHIFT 16
++#define E1000_LSECRXCAP_SUM_MASK 0x00FF0000
++#define E1000_LSECRXCAP_SUM_SHIFT 16
++
++#define E1000_LSECTXCTRL_EN_MASK 0x00000003
++#define E1000_LSECTXCTRL_DISABLE 0x0
++#define E1000_LSECTXCTRL_AUTH 0x1
++#define E1000_LSECTXCTRL_AUTH_ENCRYPT 0x2
++#define E1000_LSECTXCTRL_AISCI 0x00000020
++#define E1000_LSECTXCTRL_PNTHRSH_MASK 0xFFFFFF00
++#define E1000_LSECTXCTRL_RSV_MASK 0x000000D8
++
++#define E1000_LSECRXCTRL_EN_MASK 0x0000000C
++#define E1000_LSECRXCTRL_EN_SHIFT 2
++#define E1000_LSECRXCTRL_DISABLE 0x0
++#define E1000_LSECRXCTRL_CHECK 0x1
++#define E1000_LSECRXCTRL_STRICT 0x2
++#define E1000_LSECRXCTRL_DROP 0x3
++#define E1000_LSECRXCTRL_PLSH 0x00000040
++#define E1000_LSECRXCTRL_RP 0x00000080
++#define E1000_LSECRXCTRL_RSV_MASK 0xFFFFFF33
+
+ /* Tx Rate-Scheduler Config fields */
+ #define E1000_RTTBCNRC_RS_ENA 0x80000000
+@@ -1013,4 +1342,70 @@
+ #define E1000_RTTBCNRC_RF_INT_MASK \
+ (E1000_RTTBCNRC_RF_DEC_MASK << E1000_RTTBCNRC_RF_INT_SHIFT)
+
+-#endif
++/* DMA Coalescing register fields */
++/* DMA Coalescing Watchdog Timer */
++#define E1000_DMACR_DMACWT_MASK 0x00003FFF
++/* DMA Coalescing Rx Threshold */
++#define E1000_DMACR_DMACTHR_MASK 0x00FF0000
++#define E1000_DMACR_DMACTHR_SHIFT 16
++/* Lx when no PCIe transactions */
++#define E1000_DMACR_DMAC_LX_MASK 0x30000000
++#define E1000_DMACR_DMAC_LX_SHIFT 28
++#define E1000_DMACR_DMAC_EN 0x80000000 /* Enable DMA Coalescing */
++/* DMA Coalescing BMC-to-OS Watchdog Enable */
++#define E1000_DMACR_DC_BMC2OSW_EN 0x00008000
++
++/* DMA Coalescing Transmit Threshold */
++#define E1000_DMCTXTH_DMCTTHR_MASK 0x00000FFF
++
++#define E1000_DMCTLX_TTLX_MASK 0x00000FFF /* Time to LX request */
++
++/* Rx Traffic Rate Threshold */
++#define E1000_DMCRTRH_UTRESH_MASK 0x0007FFFF
++/* Rx packet rate in current window */
++#define E1000_DMCRTRH_LRPRCW 0x80000000
++
++/* DMA Coal Rx Traffic Current Count */
++#define E1000_DMCCNT_CCOUNT_MASK 0x01FFFFFF
++
++/* Flow ctrl Rx Threshold High val */
++#define E1000_FCRTC_RTH_COAL_MASK 0x0003FFF0
++#define E1000_FCRTC_RTH_COAL_SHIFT 4
++/* Lx power decision based on DMA coal */
++#define E1000_PCIEMISC_LX_DECISION 0x00000080
++
++#define E1000_RXPBS_CFG_TS_EN 0x80000000 /* Timestamp in Rx buffer */
++#define E1000_RXPBS_SIZE_I210_MASK 0x0000003F /* Rx packet buffer size */
++#define E1000_TXPB0S_SIZE_I210_MASK 0x0000003F /* Tx packet buffer 0 size */
++#define I210_RXPBSIZE_DEFAULT 0x000000A2 /* RXPBSIZE default */
++#define I210_TXPBSIZE_DEFAULT 0x04000014 /* TXPBSIZE default */
++
++/* Proxy Filter Control */
++#define E1000_PROXYFC_D0 0x00000001 /* Enable offload in D0 */
++#define E1000_PROXYFC_EX 0x00000004 /* Directed exact proxy */
++#define E1000_PROXYFC_MC 0x00000008 /* Directed MC Proxy */
++#define E1000_PROXYFC_BC 0x00000010 /* Broadcast Proxy Enable */
++#define E1000_PROXYFC_ARP_DIRECTED 0x00000020 /* Directed ARP Proxy Ena */
++#define E1000_PROXYFC_IPV4 0x00000040 /* Directed IPv4 Enable */
++#define E1000_PROXYFC_IPV6 0x00000080 /* Directed IPv6 Enable */
++#define E1000_PROXYFC_NS 0x00000200 /* IPv6 Neighbor Solicitation */
++#define E1000_PROXYFC_ARP 0x00000800 /* ARP Request Proxy Ena */
++/* Proxy Status */
++#define E1000_PROXYS_CLEAR 0xFFFFFFFF /* Clear */
++
++/* Firmware Status */
++#define E1000_FWSTS_FWRI 0x80000000 /* FW Reset Indication */
++/* VF Control */
++#define E1000_VTCTRL_RST 0x04000000 /* Reset VF */
++
++#define E1000_STATUS_LAN_ID_MASK 0x00000000C /* Mask for Lan ID field */
++/* Lan ID bit field offset in status register */
++#define E1000_STATUS_LAN_ID_OFFSET 2
++#define E1000_VFTA_ENTRIES 128
++#ifndef E1000_UNUSEDARG
++#define E1000_UNUSEDARG
++#endif /* E1000_UNUSEDARG */
++#ifndef ERROR_REPORT
++#define ERROR_REPORT(fmt) do { } while (0)
++#endif /* ERROR_REPORT */
++#endif /* _E1000_DEFINES_H_ */
+diff -Nu a/drivers/net/ethernet/intel/igb/e1000_hw.h b/drivers/net/ethernet/intel/igb/e1000_hw.h
+--- a/drivers/net/ethernet/intel/igb/e1000_hw.h 2016-11-13 09:20:24.790171605 +0000
++++ b/drivers/net/ethernet/intel/igb/e1000_hw.h 2016-11-14 14:32:08.579567168 +0000
+@@ -1,33 +1,31 @@
+-/* Intel(R) Gigabit Ethernet Linux driver
+- * Copyright(c) 2007-2014 Intel Corporation.
+- *
+- * This program is free software; you can redistribute it and/or modify it
+- * under the terms and conditions of the GNU General Public License,
+- *
+- * This program is distributed in the hope it will be useful, but WITHOUT
+- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+- * more details.
+- *
+- * You should have received a copy of the GNU General Public License along with
+- * this program; if not, see .
+- *
+- * The full GNU General Public License is included in this distribution in
+- * the file called "COPYING".
+- *
+- * Contact Information:
+- * e1000-devel Mailing List
+- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+- */
++/*******************************************************************************
++
++ Intel(R) Gigabit Ethernet Linux driver
++ Copyright(c) 2007-2015 Intel Corporation.
++
++ This program is free software; you can redistribute it and/or modify it
++ under the terms and conditions of the GNU General Public License,
++ version 2, as published by the Free Software Foundation.
++
++ This program is distributed in the hope it will be useful, but WITHOUT
++ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ more details.
++
++ The full GNU General Public License is included in this distribution in
++ the file called "COPYING".
++
++ Contact Information:
++ Linux NICS
++ e1000-devel Mailing List
++ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
++
++*******************************************************************************/
+
+ #ifndef _E1000_HW_H_
+ #define _E1000_HW_H_
+
+-#include
+-#include
+-#include
+-#include
+-
++#include "e1000_osdep.h"
+ #include "e1000_regs.h"
+ #include "e1000_defines.h"
+
+@@ -50,15 +48,14 @@
+ #define E1000_DEV_ID_82580_SGMII 0x1511
+ #define E1000_DEV_ID_82580_COPPER_DUAL 0x1516
+ #define E1000_DEV_ID_82580_QUAD_FIBER 0x1527
+-#define E1000_DEV_ID_DH89XXCC_SGMII 0x0438
+-#define E1000_DEV_ID_DH89XXCC_SERDES 0x043A
+-#define E1000_DEV_ID_DH89XXCC_BACKPLANE 0x043C
+-#define E1000_DEV_ID_DH89XXCC_SFP 0x0440
+ #define E1000_DEV_ID_I350_COPPER 0x1521
+ #define E1000_DEV_ID_I350_FIBER 0x1522
+ #define E1000_DEV_ID_I350_SERDES 0x1523
+ #define E1000_DEV_ID_I350_SGMII 0x1524
++#define E1000_DEV_ID_I350_DA4 0x1546
+ #define E1000_DEV_ID_I210_COPPER 0x1533
++#define E1000_DEV_ID_I210_COPPER_OEM1 0x1534
++#define E1000_DEV_ID_I210_COPPER_IT 0x1535
+ #define E1000_DEV_ID_I210_FIBER 0x1536
+ #define E1000_DEV_ID_I210_SERDES 0x1537
+ #define E1000_DEV_ID_I210_SGMII 0x1538
+@@ -68,19 +65,26 @@
+ #define E1000_DEV_ID_I354_BACKPLANE_1GBPS 0x1F40
+ #define E1000_DEV_ID_I354_SGMII 0x1F41
+ #define E1000_DEV_ID_I354_BACKPLANE_2_5GBPS 0x1F45
++#define E1000_DEV_ID_DH89XXCC_SGMII 0x0438
++#define E1000_DEV_ID_DH89XXCC_SERDES 0x043A
++#define E1000_DEV_ID_DH89XXCC_BACKPLANE 0x043C
++#define E1000_DEV_ID_DH89XXCC_SFP 0x0440
+
+-#define E1000_REVISION_2 2
+-#define E1000_REVISION_4 4
+-
+-#define E1000_FUNC_0 0
+-#define E1000_FUNC_1 1
+-#define E1000_FUNC_2 2
+-#define E1000_FUNC_3 3
+-
+-#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN0 0
+-#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN1 3
+-#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN2 6
+-#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN3 9
++#define E1000_REVISION_0 0
++#define E1000_REVISION_1 1
++#define E1000_REVISION_2 2
++#define E1000_REVISION_3 3
++#define E1000_REVISION_4 4
++
++#define E1000_FUNC_0 0
++#define E1000_FUNC_1 1
++#define E1000_FUNC_2 2
++#define E1000_FUNC_3 3
++
++#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN0 0
++#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN1 3
++#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN2 6
++#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN3 9
+
+ enum e1000_mac_type {
+ e1000_undefined = 0,
+@@ -127,6 +131,7 @@
+ e1000_phy_igp_3,
+ e1000_phy_ife,
+ e1000_phy_82580,
++ e1000_phy_vf,
+ e1000_phy_i210,
+ };
+
+@@ -181,6 +186,177 @@
+ e1000_fc_default = 0xFF
+ };
+
++enum e1000_ms_type {
++ e1000_ms_hw_default = 0,
++ e1000_ms_force_master,
++ e1000_ms_force_slave,
++ e1000_ms_auto
++};
++
++enum e1000_smart_speed {
++ e1000_smart_speed_default = 0,
++ e1000_smart_speed_on,
++ e1000_smart_speed_off
++};
++
++enum e1000_serdes_link_state {
++ e1000_serdes_link_down = 0,
++ e1000_serdes_link_autoneg_progress,
++ e1000_serdes_link_autoneg_complete,
++ e1000_serdes_link_forced_up
++};
++
++#ifndef __le16
++#define __le16 u16
++#endif
++#ifndef __le32
++#define __le32 u32
++#endif
++#ifndef __le64
++#define __le64 u64
++#endif
++/* Receive Descriptor */
++struct e1000_rx_desc {
++ __le64 buffer_addr; /* Address of the descriptor's data buffer */
++ __le16 length; /* Length of data DMAed into data buffer */
++ __le16 csum; /* Packet checksum */
++ u8 status; /* Descriptor status */
++ u8 errors; /* Descriptor Errors */
++ __le16 special;
++};
++
++/* Receive Descriptor - Extended */
++union e1000_rx_desc_extended {
++ struct {
++ __le64 buffer_addr;
++ __le64 reserved;
++ } read;
++ struct {
++ struct {
++ __le32 mrq; /* Multiple Rx Queues */
++ union {
++ __le32 rss; /* RSS Hash */
++ struct {
++ __le16 ip_id; /* IP id */
++ __le16 csum; /* Packet Checksum */
++ } csum_ip;
++ } hi_dword;
++ } lower;
++ struct {
++ __le32 status_error; /* ext status/error */
++ __le16 length;
++ __le16 vlan; /* VLAN tag */
++ } upper;
++ } wb; /* writeback */
++};
++
++#define MAX_PS_BUFFERS 4
++
++/* Number of packet split data buffers (not including the header buffer) */
++#define PS_PAGE_BUFFERS (MAX_PS_BUFFERS - 1)
++
++/* Receive Descriptor - Packet Split */
++union e1000_rx_desc_packet_split {
++ struct {
++ /* one buffer for protocol header(s), three data buffers */
++ __le64 buffer_addr[MAX_PS_BUFFERS];
++ } read;
++ struct {
++ struct {
++ __le32 mrq; /* Multiple Rx Queues */
++ union {
++ __le32 rss; /* RSS Hash */
++ struct {
++ __le16 ip_id; /* IP id */
++ __le16 csum; /* Packet Checksum */
++ } csum_ip;
++ } hi_dword;
++ } lower;
++ struct {
++ __le32 status_error; /* ext status/error */
++ __le16 length0; /* length of buffer 0 */
++ __le16 vlan; /* VLAN tag */
++ } middle;
++ struct {
++ __le16 header_status;
++ /* length of buffers 1-3 */
++ __le16 length[PS_PAGE_BUFFERS];
++ } upper;
++ __le64 reserved;
++ } wb; /* writeback */
++};
++
++/* Transmit Descriptor */
++struct e1000_tx_desc {
++ __le64 buffer_addr; /* Address of the descriptor's data buffer */
++ union {
++ __le32 data;
++ struct {
++ __le16 length; /* Data buffer length */
++ u8 cso; /* Checksum offset */
++ u8 cmd; /* Descriptor control */
++ } flags;
++ } lower;
++ union {
++ __le32 data;
++ struct {
++ u8 status; /* Descriptor status */
++ u8 css; /* Checksum start */
++ __le16 special;
++ } fields;
++ } upper;
++};
++
++/* Offload Context Descriptor */
++struct e1000_context_desc {
++ union {
++ __le32 ip_config;
++ struct {
++ u8 ipcss; /* IP checksum start */
++ u8 ipcso; /* IP checksum offset */
++ __le16 ipcse; /* IP checksum end */
++ } ip_fields;
++ } lower_setup;
++ union {
++ __le32 tcp_config;
++ struct {
++ u8 tucss; /* TCP checksum start */
++ u8 tucso; /* TCP checksum offset */
++ __le16 tucse; /* TCP checksum end */
++ } tcp_fields;
++ } upper_setup;
++ __le32 cmd_and_length;
++ union {
++ __le32 data;
++ struct {
++ u8 status; /* Descriptor status */
++ u8 hdr_len; /* Header length */
++ __le16 mss; /* Maximum segment size */
++ } fields;
++ } tcp_seg_setup;
++};
++
++/* Offload data descriptor */
++struct e1000_data_desc {
++ __le64 buffer_addr; /* Address of the descriptor's buffer address */
++ union {
++ __le32 data;
++ struct {
++ __le16 length; /* Data buffer length */
++ u8 typ_len_ext;
++ u8 cmd;
++ } flags;
++ } lower;
++ union {
++ __le32 data;
++ struct {
++ u8 status; /* Descriptor status */
++ u8 popts; /* Packet Options */
++ __le16 special;
++ } fields;
++ } upper;
++};
++
+ /* Statistics counters collected by the MAC */
+ struct e1000_hw_stats {
+ u64 crcerrs;
+@@ -289,7 +465,7 @@
+ u8 checksum;
+ };
+
+-#define E1000_HI_MAX_DATA_LENGTH 252
++#define E1000_HI_MAX_DATA_LENGTH 252
+ struct e1000_host_command_info {
+ struct e1000_host_command_header command_header;
+ u8 command_data[E1000_HI_MAX_DATA_LENGTH];
+@@ -304,7 +480,7 @@
+ u16 command_length;
+ };
+
+-#define E1000_HI_MAX_MNG_DATA_LENGTH 0x6F8
++#define E1000_HI_MAX_MNG_DATA_LENGTH 0x6F8
+ struct e1000_host_mng_command_info {
+ struct e1000_host_mng_command_header command_header;
+ u8 command_data[E1000_HI_MAX_MNG_DATA_LENGTH];
+@@ -313,52 +489,95 @@
+ #include "e1000_mac.h"
+ #include "e1000_phy.h"
+ #include "e1000_nvm.h"
++#include "e1000_manage.h"
+ #include "e1000_mbx.h"
+
++/* Function pointers for the MAC. */
+ struct e1000_mac_operations {
+- s32 (*check_for_link)(struct e1000_hw *);
+- s32 (*reset_hw)(struct e1000_hw *);
+- s32 (*init_hw)(struct e1000_hw *);
++ s32 (*init_params)(struct e1000_hw *);
++ s32 (*id_led_init)(struct e1000_hw *);
++ s32 (*blink_led)(struct e1000_hw *);
+ bool (*check_mng_mode)(struct e1000_hw *);
+- s32 (*setup_physical_interface)(struct e1000_hw *);
+- void (*rar_set)(struct e1000_hw *, u8 *, u32);
+- s32 (*read_mac_addr)(struct e1000_hw *);
+- s32 (*get_speed_and_duplex)(struct e1000_hw *, u16 *, u16 *);
+- s32 (*acquire_swfw_sync)(struct e1000_hw *, u16);
+- void (*release_swfw_sync)(struct e1000_hw *, u16);
+-#ifdef CONFIG_IGB_HWMON
++ s32 (*check_for_link)(struct e1000_hw *);
++ s32 (*cleanup_led)(struct e1000_hw *);
++ void (*clear_hw_cntrs)(struct e1000_hw *);
++ void (*clear_vfta)(struct e1000_hw *);
++ s32 (*get_bus_info)(struct e1000_hw *);
++ void (*set_lan_id)(struct e1000_hw *);
++ s32 (*get_link_up_info)(struct e1000_hw *, u16 *, u16 *);
++ s32 (*led_on)(struct e1000_hw *);
++ s32 (*led_off)(struct e1000_hw *);
++ void (*update_mc_addr_list)(struct e1000_hw *, u8 *, u32);
++ s32 (*reset_hw)(struct e1000_hw *);
++ s32 (*init_hw)(struct e1000_hw *);
++ void (*shutdown_serdes)(struct e1000_hw *);
++ void (*power_up_serdes)(struct e1000_hw *);
++ s32 (*setup_link)(struct e1000_hw *);
++ s32 (*setup_physical_interface)(struct e1000_hw *);
++ s32 (*setup_led)(struct e1000_hw *);
++ void (*write_vfta)(struct e1000_hw *, u32, u32);
++ void (*config_collision_dist)(struct e1000_hw *);
++ int (*rar_set)(struct e1000_hw *, u8*, u32);
++ s32 (*read_mac_addr)(struct e1000_hw *);
++ s32 (*validate_mdi_setting)(struct e1000_hw *);
+ s32 (*get_thermal_sensor_data)(struct e1000_hw *);
+ s32 (*init_thermal_sensor_thresh)(struct e1000_hw *);
+-#endif
+-
++ s32 (*acquire_swfw_sync)(struct e1000_hw *, u16);
++ void (*release_swfw_sync)(struct e1000_hw *, u16);
+ };
+
++/* When to use various PHY register access functions:
++ *
++ * Func Caller
++ * Function Does Does When to use
++ * ~~~~~~~~~~~~ ~~~~~ ~~~~~~ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ * X_reg L,P,A n/a for simple PHY reg accesses
++ * X_reg_locked P,A L for multiple accesses of different regs
++ * on different pages
++ * X_reg_page A L,P for multiple accesses of different regs
++ * on the same page
++ *
++ * Where X=[read|write], L=locking, P=sets page, A=register access
++ *
++ */
+ struct e1000_phy_operations {
+- s32 (*acquire)(struct e1000_hw *);
+- s32 (*check_polarity)(struct e1000_hw *);
+- s32 (*check_reset_block)(struct e1000_hw *);
+- s32 (*force_speed_duplex)(struct e1000_hw *);
+- s32 (*get_cfg_done)(struct e1000_hw *hw);
+- s32 (*get_cable_length)(struct e1000_hw *);
+- s32 (*get_phy_info)(struct e1000_hw *);
+- s32 (*read_reg)(struct e1000_hw *, u32, u16 *);
++ s32 (*init_params)(struct e1000_hw *);
++ s32 (*acquire)(struct e1000_hw *);
++ s32 (*check_polarity)(struct e1000_hw *);
++ s32 (*check_reset_block)(struct e1000_hw *);
++ s32 (*commit)(struct e1000_hw *);
++ s32 (*force_speed_duplex)(struct e1000_hw *);
++ s32 (*get_cfg_done)(struct e1000_hw *hw);
++ s32 (*get_cable_length)(struct e1000_hw *);
++ s32 (*get_info)(struct e1000_hw *);
++ s32 (*set_page)(struct e1000_hw *, u16);
++ s32 (*read_reg)(struct e1000_hw *, u32, u16 *);
++ s32 (*read_reg_locked)(struct e1000_hw *, u32, u16 *);
++ s32 (*read_reg_page)(struct e1000_hw *, u32, u16 *);
+ void (*release)(struct e1000_hw *);
+- s32 (*reset)(struct e1000_hw *);
+- s32 (*set_d0_lplu_state)(struct e1000_hw *, bool);
+- s32 (*set_d3_lplu_state)(struct e1000_hw *, bool);
+- s32 (*write_reg)(struct e1000_hw *, u32, u16);
++ s32 (*reset)(struct e1000_hw *);
++ s32 (*set_d0_lplu_state)(struct e1000_hw *, bool);
++ s32 (*set_d3_lplu_state)(struct e1000_hw *, bool);
++ s32 (*write_reg)(struct e1000_hw *, u32, u16);
++ s32 (*write_reg_locked)(struct e1000_hw *, u32, u16);
++ s32 (*write_reg_page)(struct e1000_hw *, u32, u16);
++ void (*power_up)(struct e1000_hw *);
++ void (*power_down)(struct e1000_hw *);
+ s32 (*read_i2c_byte)(struct e1000_hw *, u8, u8, u8 *);
+ s32 (*write_i2c_byte)(struct e1000_hw *, u8, u8, u8);
+ };
+
++/* Function pointers for the NVM. */
+ struct e1000_nvm_operations {
+- s32 (*acquire)(struct e1000_hw *);
+- s32 (*read)(struct e1000_hw *, u16, u16, u16 *);
++ s32 (*init_params)(struct e1000_hw *);
++ s32 (*acquire)(struct e1000_hw *);
++ s32 (*read)(struct e1000_hw *, u16, u16, u16 *);
+ void (*release)(struct e1000_hw *);
+- s32 (*write)(struct e1000_hw *, u16, u16, u16 *);
+- s32 (*update)(struct e1000_hw *);
+- s32 (*validate)(struct e1000_hw *);
+- s32 (*valid_led_default)(struct e1000_hw *, u16 *);
++ void (*reload)(struct e1000_hw *);
++ s32 (*update)(struct e1000_hw *);
++ s32 (*valid_led_default)(struct e1000_hw *, u16 *);
++ s32 (*validate)(struct e1000_hw *);
++ s32 (*write)(struct e1000_hw *, u16, u16, u16 *);
+ };
+
+ #define E1000_MAX_SENSORS 3
+@@ -374,49 +593,45 @@
+ struct e1000_thermal_diode_data sensor[E1000_MAX_SENSORS];
+ };
+
+-struct e1000_info {
+- s32 (*get_invariants)(struct e1000_hw *);
+- struct e1000_mac_operations *mac_ops;
+- struct e1000_phy_operations *phy_ops;
+- struct e1000_nvm_operations *nvm_ops;
+-};
+-
+-extern const struct e1000_info e1000_82575_info;
+-
+ struct e1000_mac_info {
+ struct e1000_mac_operations ops;
+-
+- u8 addr[6];
+- u8 perm_addr[6];
++ u8 addr[ETH_ADDR_LEN];
++ u8 perm_addr[ETH_ADDR_LEN];
+
+ enum e1000_mac_type type;
+
++ u32 collision_delta;
+ u32 ledctl_default;
+ u32 ledctl_mode1;
+ u32 ledctl_mode2;
+ u32 mc_filter_type;
++ u32 tx_packet_delta;
+ u32 txcw;
+
++ u16 current_ifs_val;
++ u16 ifs_max_val;
++ u16 ifs_min_val;
++ u16 ifs_ratio;
++ u16 ifs_step_size;
+ u16 mta_reg_count;
+ u16 uta_reg_count;
+
+ /* Maximum size of the MTA register table in all supported adapters */
+- #define MAX_MTA_REG 128
++#define MAX_MTA_REG 128
+ u32 mta_shadow[MAX_MTA_REG];
+ u16 rar_entry_count;
+
+ u8 forced_speed_duplex;
+
+ bool adaptive_ifs;
++ bool has_fwsm;
+ bool arc_subsystem_valid;
+ bool asf_firmware_present;
+ bool autoneg;
+ bool autoneg_failed;
+- bool disable_hw_init_bits;
+ bool get_link_status;
+- bool ifs_params_forced;
+ bool in_ifs_mode;
+- bool report_tx_early;
++ enum e1000_serdes_link_state serdes_link_state;
+ bool serdes_has_link;
+ bool tx_pkt_filtering;
+ struct e1000_thermal_sensor_data thermal_sensor_data;
+@@ -424,7 +639,6 @@
+
+ struct e1000_phy_info {
+ struct e1000_phy_operations ops;
+-
+ enum e1000_phy_type type;
+
+ enum e1000_1000t_rx_status local_rx;
+@@ -477,20 +691,19 @@
+ enum e1000_bus_speed speed;
+ enum e1000_bus_width width;
+
+- u32 snoop;
+-
+ u16 func;
+ u16 pci_cmd_word;
+ };
+
+ struct e1000_fc_info {
+- u32 high_water; /* Flow control high-water mark */
+- u32 low_water; /* Flow control low-water mark */
+- u16 pause_time; /* Flow control pause timer */
+- bool send_xon; /* Flow control send XON */
+- bool strict_ieee; /* Strict IEEE mode */
+- enum e1000_fc_mode current_mode; /* Type of flow control */
+- enum e1000_fc_mode requested_mode;
++ u32 high_water; /* Flow control high-water mark */
++ u32 low_water; /* Flow control low-water mark */
++ u16 pause_time; /* Flow control pause timer */
++ u16 refresh_time; /* Flow control refresh timer */
++ bool send_xon; /* Flow control send XON */
++ bool strict_ieee; /* Strict IEEE mode */
++ enum e1000_fc_mode current_mode; /* FC mode in effect */
++ enum e1000_fc_mode requested_mode; /* FC mode requested by caller */
+ };
+
+ struct e1000_mbx_operations {
+@@ -525,12 +738,17 @@
+ bool sgmii_active;
+ bool global_device_reset;
+ bool eee_disable;
+- bool clear_semaphore_once;
+- struct e1000_sfp_flags eth_flags;
+ bool module_plugged;
++ bool clear_semaphore_once;
++ u32 mtu;
++ struct sfp_e1000_flags eth_flags;
+ u8 media_port;
+ bool media_changed;
+- bool mas_capable;
++};
++
++struct e1000_dev_spec_vf {
++ u32 vf_number;
++ u32 v2p_mailbox;
+ };
+
+ struct e1000_hw {
+@@ -549,7 +767,8 @@
+ struct e1000_host_mng_dhcp_cookie mng_cookie;
+
+ union {
+- struct e1000_dev_spec_82575 _82575;
++ struct e1000_dev_spec_82575 _82575;
++ struct e1000_dev_spec_vf vf;
+ } dev_spec;
+
+ u16 device_id;
+@@ -560,14 +779,13 @@
+ u8 revision_id;
+ };
+
+-struct net_device *igb_get_hw_dev(struct e1000_hw *hw);
+-#define hw_dbg(format, arg...) \
+- netdev_dbg(igb_get_hw_dev(hw), format, ##arg)
++#include "e1000_82575.h"
++#include "e1000_i210.h"
+
+ /* These functions must be implemented by drivers */
+-s32 igb_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value);
+-s32 igb_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value);
++s32 e1000_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value);
++s32 e1000_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value);
++void e1000_read_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value);
++void e1000_write_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value);
+
+-void igb_read_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value);
+-void igb_write_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value);
+-#endif /* _E1000_HW_H_ */
++#endif
+diff -Nu a/drivers/net/ethernet/intel/igb/e1000_i210.c b/drivers/net/ethernet/intel/igb/e1000_i210.c
+--- a/drivers/net/ethernet/intel/igb/e1000_i210.c 2016-11-13 09:20:24.790171605 +0000
++++ b/drivers/net/ethernet/intel/igb/e1000_i210.c 2016-11-14 14:32:08.579567168 +0000
+@@ -1,107 +1,40 @@
+-/* Intel(R) Gigabit Ethernet Linux driver
+- * Copyright(c) 2007-2014 Intel Corporation.
+- *
+- * This program is free software; you can redistribute it and/or modify it
+- * under the terms and conditions of the GNU General Public License,
+- * version 2, as published by the Free Software Foundation.
+- *
+- * This program is distributed in the hope it will be useful, but WITHOUT
+- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+- * more details.
+- *
+- * You should have received a copy of the GNU General Public License along with
+- * this program; if not, see .
+- *
+- * The full GNU General Public License is included in this distribution in
+- * the file called "COPYING".
+- *
+- * Contact Information:
+- * e1000-devel Mailing List
+- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+- */
++/*******************************************************************************
+
+-/* e1000_i210
+- * e1000_i211
+- */
++ Intel(R) Gigabit Ethernet Linux driver
++ Copyright(c) 2007-2015 Intel Corporation.
+
+-#include
+-#include
++ This program is free software; you can redistribute it and/or modify it
++ under the terms and conditions of the GNU General Public License,
++ version 2, as published by the Free Software Foundation.
+
+-#include "e1000_hw.h"
+-#include "e1000_i210.h"
++ This program is distributed in the hope it will be useful, but WITHOUT
++ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ more details.
+
+-static s32 igb_update_flash_i210(struct e1000_hw *hw);
++ The full GNU General Public License is included in this distribution in
++ the file called "COPYING".
+
+-/**
+- * igb_get_hw_semaphore_i210 - Acquire hardware semaphore
+- * @hw: pointer to the HW structure
+- *
+- * Acquire the HW semaphore to access the PHY or NVM
+- */
+-static s32 igb_get_hw_semaphore_i210(struct e1000_hw *hw)
+-{
+- u32 swsm;
+- s32 timeout = hw->nvm.word_size + 1;
+- s32 i = 0;
++ Contact Information:
++ Linux NICS
++ e1000-devel Mailing List
++ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+- /* Get the SW semaphore */
+- while (i < timeout) {
+- swsm = rd32(E1000_SWSM);
+- if (!(swsm & E1000_SWSM_SMBI))
+- break;
++*******************************************************************************/
+
+- udelay(50);
+- i++;
+- }
++#include "e1000_api.h"
+
+- if (i == timeout) {
+- /* In rare circumstances, the SW semaphore may already be held
+- * unintentionally. Clear the semaphore once before giving up.
+- */
+- if (hw->dev_spec._82575.clear_semaphore_once) {
+- hw->dev_spec._82575.clear_semaphore_once = false;
+- igb_put_hw_semaphore(hw);
+- for (i = 0; i < timeout; i++) {
+- swsm = rd32(E1000_SWSM);
+- if (!(swsm & E1000_SWSM_SMBI))
+- break;
+
+- udelay(50);
+- }
+- }
+-
+- /* If we do not have the semaphore here, we have to give up. */
+- if (i == timeout) {
+- hw_dbg("Driver can't access device - SMBI bit is set.\n");
+- return -E1000_ERR_NVM;
+- }
+- }
+-
+- /* Get the FW semaphore. */
+- for (i = 0; i < timeout; i++) {
+- swsm = rd32(E1000_SWSM);
+- wr32(E1000_SWSM, swsm | E1000_SWSM_SWESMBI);
+-
+- /* Semaphore acquired if bit latched */
+- if (rd32(E1000_SWSM) & E1000_SWSM_SWESMBI)
+- break;
+-
+- udelay(50);
+- }
+-
+- if (i == timeout) {
+- /* Release semaphores */
+- igb_put_hw_semaphore(hw);
+- hw_dbg("Driver can't access the NVM\n");
+- return -E1000_ERR_NVM;
+- }
+-
+- return 0;
+-}
++static s32 e1000_acquire_nvm_i210(struct e1000_hw *hw);
++static void e1000_release_nvm_i210(struct e1000_hw *hw);
++static s32 e1000_get_hw_semaphore_i210(struct e1000_hw *hw);
++static s32 e1000_write_nvm_srwr(struct e1000_hw *hw, u16 offset, u16 words,
++ u16 *data);
++static s32 e1000_pool_flash_update_done_i210(struct e1000_hw *hw);
++static s32 e1000_valid_led_default_i210(struct e1000_hw *hw, u16 *data);
+
+ /**
+- * igb_acquire_nvm_i210 - Request for access to EEPROM
++ * e1000_acquire_nvm_i210 - Request for access to EEPROM
+ * @hw: pointer to the HW structure
+ *
+ * Acquire the necessary semaphores for exclusive access to the EEPROM.
+@@ -109,93 +42,178 @@
+ * Return successful if access grant bit set, else clear the request for
+ * EEPROM access and return -E1000_ERR_NVM (-1).
+ **/
+-static s32 igb_acquire_nvm_i210(struct e1000_hw *hw)
++static s32 e1000_acquire_nvm_i210(struct e1000_hw *hw)
+ {
+- return igb_acquire_swfw_sync_i210(hw, E1000_SWFW_EEP_SM);
++ s32 ret_val;
++
++ DEBUGFUNC("e1000_acquire_nvm_i210");
++
++ ret_val = e1000_acquire_swfw_sync_i210(hw, E1000_SWFW_EEP_SM);
++
++ return ret_val;
+ }
+
+ /**
+- * igb_release_nvm_i210 - Release exclusive access to EEPROM
++ * e1000_release_nvm_i210 - Release exclusive access to EEPROM
+ * @hw: pointer to the HW structure
+ *
+ * Stop any current commands to the EEPROM and clear the EEPROM request bit,
+ * then release the semaphores acquired.
+ **/
+-static void igb_release_nvm_i210(struct e1000_hw *hw)
++static void e1000_release_nvm_i210(struct e1000_hw *hw)
+ {
+- igb_release_swfw_sync_i210(hw, E1000_SWFW_EEP_SM);
++ DEBUGFUNC("e1000_release_nvm_i210");
++
++ e1000_release_swfw_sync_i210(hw, E1000_SWFW_EEP_SM);
+ }
+
+ /**
+- * igb_acquire_swfw_sync_i210 - Acquire SW/FW semaphore
++ * e1000_acquire_swfw_sync_i210 - Acquire SW/FW semaphore
+ * @hw: pointer to the HW structure
+ * @mask: specifies which semaphore to acquire
+ *
+ * Acquire the SW/FW semaphore to access the PHY or NVM. The mask
+ * will also specify which port we're acquiring the lock for.
+ **/
+-s32 igb_acquire_swfw_sync_i210(struct e1000_hw *hw, u16 mask)
++s32 e1000_acquire_swfw_sync_i210(struct e1000_hw *hw, u16 mask)
+ {
+ u32 swfw_sync;
+ u32 swmask = mask;
+ u32 fwmask = mask << 16;
+- s32 ret_val = 0;
++ s32 ret_val = E1000_SUCCESS;
+ s32 i = 0, timeout = 200; /* FIXME: find real value to use here */
+
++ DEBUGFUNC("e1000_acquire_swfw_sync_i210");
++
+ while (i < timeout) {
+- if (igb_get_hw_semaphore_i210(hw)) {
++ if (e1000_get_hw_semaphore_i210(hw)) {
+ ret_val = -E1000_ERR_SWFW_SYNC;
+ goto out;
+ }
+
+- swfw_sync = rd32(E1000_SW_FW_SYNC);
++ swfw_sync = E1000_READ_REG(hw, E1000_SW_FW_SYNC);
+ if (!(swfw_sync & (fwmask | swmask)))
+ break;
+
+- /* Firmware currently using resource (fwmask) */
+- igb_put_hw_semaphore(hw);
+- mdelay(5);
++ /*
++ * Firmware currently using resource (fwmask)
++ * or other software thread using resource (swmask)
++ */
++ e1000_put_hw_semaphore_generic(hw);
++ msec_delay_irq(5);
+ i++;
+ }
+
+ if (i == timeout) {
+- hw_dbg("Driver can't access resource, SW_FW_SYNC timeout.\n");
++ DEBUGOUT("Driver can't access resource, SW_FW_SYNC timeout.\n");
+ ret_val = -E1000_ERR_SWFW_SYNC;
+ goto out;
+ }
+
+ swfw_sync |= swmask;
+- wr32(E1000_SW_FW_SYNC, swfw_sync);
++ E1000_WRITE_REG(hw, E1000_SW_FW_SYNC, swfw_sync);
++
++ e1000_put_hw_semaphore_generic(hw);
+
+- igb_put_hw_semaphore(hw);
+ out:
+ return ret_val;
+ }
+
+ /**
+- * igb_release_swfw_sync_i210 - Release SW/FW semaphore
++ * e1000_release_swfw_sync_i210 - Release SW/FW semaphore
+ * @hw: pointer to the HW structure
+ * @mask: specifies which semaphore to acquire
+ *
+ * Release the SW/FW semaphore used to access the PHY or NVM. The mask
+ * will also specify which port we're releasing the lock for.
+ **/
+-void igb_release_swfw_sync_i210(struct e1000_hw *hw, u16 mask)
++void e1000_release_swfw_sync_i210(struct e1000_hw *hw, u16 mask)
+ {
+ u32 swfw_sync;
+
+- while (igb_get_hw_semaphore_i210(hw))
++ DEBUGFUNC("e1000_release_swfw_sync_i210");
++
++ while (e1000_get_hw_semaphore_i210(hw) != E1000_SUCCESS)
+ ; /* Empty */
+
+- swfw_sync = rd32(E1000_SW_FW_SYNC);
++ swfw_sync = E1000_READ_REG(hw, E1000_SW_FW_SYNC);
+ swfw_sync &= ~mask;
+- wr32(E1000_SW_FW_SYNC, swfw_sync);
++ E1000_WRITE_REG(hw, E1000_SW_FW_SYNC, swfw_sync);
+
+- igb_put_hw_semaphore(hw);
++ e1000_put_hw_semaphore_generic(hw);
+ }
+
+ /**
+- * igb_read_nvm_srrd_i210 - Reads Shadow Ram using EERD register
++ * e1000_get_hw_semaphore_i210 - Acquire hardware semaphore
++ * @hw: pointer to the HW structure
++ *
++ * Acquire the HW semaphore to access the PHY or NVM
++ **/
++static s32 e1000_get_hw_semaphore_i210(struct e1000_hw *hw)
++{
++ u32 swsm;
++ s32 timeout = hw->nvm.word_size + 1;
++ s32 i = 0;
++
++ DEBUGFUNC("e1000_get_hw_semaphore_i210");
++
++ /* Get the SW semaphore */
++ while (i < timeout) {
++ swsm = E1000_READ_REG(hw, E1000_SWSM);
++ if (!(swsm & E1000_SWSM_SMBI))
++ break;
++
++ usec_delay(50);
++ i++;
++ }
++
++ if (i == timeout) {
++ /* In rare circumstances, the SW semaphore may already be held
++ * unintentionally. Clear the semaphore once before giving up.
++ */
++ if (hw->dev_spec._82575.clear_semaphore_once) {
++ hw->dev_spec._82575.clear_semaphore_once = false;
++ e1000_put_hw_semaphore_generic(hw);
++ for (i = 0; i < timeout; i++) {
++ swsm = E1000_READ_REG(hw, E1000_SWSM);
++ if (!(swsm & E1000_SWSM_SMBI))
++ break;
++
++ usec_delay(50);
++ }
++ }
++
++ /* If we do not have the semaphore here, we have to give up. */
++ if (i == timeout) {
++ DEBUGOUT("Driver can't access device - SMBI bit is set.\n");
++ return -E1000_ERR_NVM;
++ }
++ }
++
++ /* Get the FW semaphore. */
++ for (i = 0; i < timeout; i++) {
++ swsm = E1000_READ_REG(hw, E1000_SWSM);
++ E1000_WRITE_REG(hw, E1000_SWSM, swsm | E1000_SWSM_SWESMBI);
++
++ /* Semaphore acquired if bit latched */
++ if (E1000_READ_REG(hw, E1000_SWSM) & E1000_SWSM_SWESMBI)
++ break;
++
++ usec_delay(50);
++ }
++
++ if (i == timeout) {
++ /* Release semaphores */
++ e1000_put_hw_semaphore_generic(hw);
++ DEBUGOUT("Driver can't access the NVM\n");
++ return -E1000_ERR_NVM;
++ }
++
++ return E1000_SUCCESS;
++}
++
++/**
++ * e1000_read_nvm_srrd_i210 - Reads Shadow Ram using EERD register
+ * @hw: pointer to the HW structure
+ * @offset: offset of word in the Shadow Ram to read
+ * @words: number of words to read
+@@ -204,28 +222,74 @@
+ * Reads a 16 bit word from the Shadow Ram using the EERD register.
+ * Uses necessary synchronization semaphores.
+ **/
+-static s32 igb_read_nvm_srrd_i210(struct e1000_hw *hw, u16 offset, u16 words,
+- u16 *data)
++s32 e1000_read_nvm_srrd_i210(struct e1000_hw *hw, u16 offset, u16 words,
++ u16 *data)
+ {
+- s32 status = 0;
++ s32 status = E1000_SUCCESS;
+ u16 i, count;
+
++ DEBUGFUNC("e1000_read_nvm_srrd_i210");
++
+ /* We cannot hold synchronization semaphores for too long,
+ * because of forceful takeover procedure. However it is more efficient
+- * to read in bursts than synchronizing access for each word.
+- */
++ * to read in bursts than synchronizing access for each word. */
+ for (i = 0; i < words; i += E1000_EERD_EEWR_MAX_COUNT) {
+ count = (words - i) / E1000_EERD_EEWR_MAX_COUNT > 0 ?
+ E1000_EERD_EEWR_MAX_COUNT : (words - i);
+- if (!(hw->nvm.ops.acquire(hw))) {
+- status = igb_read_nvm_eerd(hw, offset, count,
++ if (hw->nvm.ops.acquire(hw) == E1000_SUCCESS) {
++ status = e1000_read_nvm_eerd(hw, offset, count,
+ data + i);
+ hw->nvm.ops.release(hw);
+ } else {
+ status = E1000_ERR_SWFW_SYNC;
+ }
+
+- if (status)
++ if (status != E1000_SUCCESS)
++ break;
++ }
++
++ return status;
++}
++
++/**
++ * e1000_write_nvm_srwr_i210 - Write to Shadow RAM using EEWR
++ * @hw: pointer to the HW structure
++ * @offset: offset within the Shadow RAM to be written to
++ * @words: number of words to write
++ * @data: 16 bit word(s) to be written to the Shadow RAM
++ *
++ * Writes data to Shadow RAM at offset using EEWR register.
++ *
++ * If e1000_update_nvm_checksum is not called after this function , the
++ * data will not be committed to FLASH and also Shadow RAM will most likely
++ * contain an invalid checksum.
++ *
++ * If error code is returned, data and Shadow RAM may be inconsistent - buffer
++ * partially written.
++ **/
++s32 e1000_write_nvm_srwr_i210(struct e1000_hw *hw, u16 offset, u16 words,
++ u16 *data)
++{
++ s32 status = E1000_SUCCESS;
++ u16 i, count;
++
++ DEBUGFUNC("e1000_write_nvm_srwr_i210");
++
++ /* We cannot hold synchronization semaphores for too long,
++ * because of forceful takeover procedure. However it is more efficient
++ * to write in bursts than synchronizing access for each word. */
++ for (i = 0; i < words; i += E1000_EERD_EEWR_MAX_COUNT) {
++ count = (words - i) / E1000_EERD_EEWR_MAX_COUNT > 0 ?
++ E1000_EERD_EEWR_MAX_COUNT : (words - i);
++ if (hw->nvm.ops.acquire(hw) == E1000_SUCCESS) {
++ status = e1000_write_nvm_srwr(hw, offset, count,
++ data + i);
++ hw->nvm.ops.release(hw);
++ } else {
++ status = E1000_ERR_SWFW_SYNC;
++ }
++
++ if (status != E1000_SUCCESS)
+ break;
+ }
+
+@@ -233,7 +297,7 @@
+ }
+
+ /**
+- * igb_write_nvm_srwr - Write to Shadow Ram using EEWR
++ * e1000_write_nvm_srwr - Write to Shadow Ram using EEWR
+ * @hw: pointer to the HW structure
+ * @offset: offset within the Shadow Ram to be written to
+ * @words: number of words to write
+@@ -241,23 +305,26 @@
+ *
+ * Writes data to Shadow Ram at offset using EEWR register.
+ *
+- * If igb_update_nvm_checksum is not called after this function , the
++ * If e1000_update_nvm_checksum is not called after this function , the
+ * Shadow Ram will most likely contain an invalid checksum.
+ **/
+-static s32 igb_write_nvm_srwr(struct e1000_hw *hw, u16 offset, u16 words,
++static s32 e1000_write_nvm_srwr(struct e1000_hw *hw, u16 offset, u16 words,
+ u16 *data)
+ {
+ struct e1000_nvm_info *nvm = &hw->nvm;
+ u32 i, k, eewr = 0;
+ u32 attempts = 100000;
+- s32 ret_val = 0;
++ s32 ret_val = E1000_SUCCESS;
+
+- /* A check for invalid values: offset too large, too many words,
++ DEBUGFUNC("e1000_write_nvm_srwr");
++
++ /*
++ * A check for invalid values: offset too large, too many words,
+ * too many words for the offset, and not enough words.
+ */
+ if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
+ (words == 0)) {
+- hw_dbg("nvm parameter(s) out of bounds\n");
++ DEBUGOUT("nvm parameter(s) out of bounds\n");
+ ret_val = -E1000_ERR_NVM;
+ goto out;
+ }
+@@ -267,19 +334,19 @@
+ (data[i] << E1000_NVM_RW_REG_DATA) |
+ E1000_NVM_RW_REG_START;
+
+- wr32(E1000_SRWR, eewr);
++ E1000_WRITE_REG(hw, E1000_SRWR, eewr);
+
+ for (k = 0; k < attempts; k++) {
+ if (E1000_NVM_RW_REG_DONE &
+- rd32(E1000_SRWR)) {
+- ret_val = 0;
++ E1000_READ_REG(hw, E1000_SRWR)) {
++ ret_val = E1000_SUCCESS;
+ break;
+ }
+- udelay(5);
+- }
++ usec_delay(5);
++ }
+
+- if (ret_val) {
+- hw_dbg("Shadow RAM write EEWR timed out\n");
++ if (ret_val != E1000_SUCCESS) {
++ DEBUGOUT("Shadow RAM write EEWR timed out\n");
+ break;
+ }
+ }
+@@ -288,52 +355,7 @@
+ return ret_val;
+ }
+
+-/**
+- * igb_write_nvm_srwr_i210 - Write to Shadow RAM using EEWR
+- * @hw: pointer to the HW structure
+- * @offset: offset within the Shadow RAM to be written to
+- * @words: number of words to write
+- * @data: 16 bit word(s) to be written to the Shadow RAM
+- *
+- * Writes data to Shadow RAM at offset using EEWR register.
+- *
+- * If e1000_update_nvm_checksum is not called after this function , the
+- * data will not be committed to FLASH and also Shadow RAM will most likely
+- * contain an invalid checksum.
+- *
+- * If error code is returned, data and Shadow RAM may be inconsistent - buffer
+- * partially written.
+- **/
+-static s32 igb_write_nvm_srwr_i210(struct e1000_hw *hw, u16 offset, u16 words,
+- u16 *data)
+-{
+- s32 status = 0;
+- u16 i, count;
+-
+- /* We cannot hold synchronization semaphores for too long,
+- * because of forceful takeover procedure. However it is more efficient
+- * to write in bursts than synchronizing access for each word.
+- */
+- for (i = 0; i < words; i += E1000_EERD_EEWR_MAX_COUNT) {
+- count = (words - i) / E1000_EERD_EEWR_MAX_COUNT > 0 ?
+- E1000_EERD_EEWR_MAX_COUNT : (words - i);
+- if (!(hw->nvm.ops.acquire(hw))) {
+- status = igb_write_nvm_srwr(hw, offset, count,
+- data + i);
+- hw->nvm.ops.release(hw);
+- } else {
+- status = E1000_ERR_SWFW_SYNC;
+- }
+-
+- if (status)
+- break;
+- }
+-
+- return status;
+-}
+-
+-/**
+- * igb_read_invm_word_i210 - Reads OTP
++/** e1000_read_invm_word_i210 - Reads OTP
+ * @hw: pointer to the HW structure
+ * @address: the word address (aka eeprom offset) to read
+ * @data: pointer to the data read
+@@ -341,15 +363,17 @@
+ * Reads 16-bit words from the OTP. Return error when the word is not
+ * stored in OTP.
+ **/
+-static s32 igb_read_invm_word_i210(struct e1000_hw *hw, u8 address, u16 *data)
++static s32 e1000_read_invm_word_i210(struct e1000_hw *hw, u8 address, u16 *data)
+ {
+ s32 status = -E1000_ERR_INVM_VALUE_NOT_FOUND;
+ u32 invm_dword;
+ u16 i;
+ u8 record_type, word_address;
+
++ DEBUGFUNC("e1000_read_invm_word_i210");
++
+ for (i = 0; i < E1000_INVM_SIZE; i++) {
+- invm_dword = rd32(E1000_INVM_DATA_REG(i));
++ invm_dword = E1000_READ_REG(hw, E1000_INVM_DATA_REG(i));
+ /* Get record type */
+ record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
+ if (record_type == E1000_INVM_UNINITIALIZED_STRUCTURE)
+@@ -362,75 +386,76 @@
+ word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
+ if (word_address == address) {
+ *data = INVM_DWORD_TO_WORD_DATA(invm_dword);
+- hw_dbg("Read INVM Word 0x%02x = %x\n",
++ DEBUGOUT2("Read INVM Word 0x%02x = %x",
+ address, *data);
+- status = 0;
++ status = E1000_SUCCESS;
+ break;
+ }
+ }
+ }
+- if (status)
+- hw_dbg("Requested word 0x%02x not found in OTP\n", address);
++ if (status != E1000_SUCCESS)
++ DEBUGOUT1("Requested word 0x%02x not found in OTP\n", address);
+ return status;
+ }
+
+-/**
+- * igb_read_invm_i210 - Read invm wrapper function for I210/I211
++/** e1000_read_invm_i210 - Read invm wrapper function for I210/I211
+ * @hw: pointer to the HW structure
+- * @words: number of words to read
++ * @address: the word address (aka eeprom offset) to read
+ * @data: pointer to the data read
+ *
+ * Wrapper function to return data formerly found in the NVM.
+ **/
+-static s32 igb_read_invm_i210(struct e1000_hw *hw, u16 offset,
+- u16 words __always_unused, u16 *data)
++static s32 e1000_read_invm_i210(struct e1000_hw *hw, u16 offset,
++ u16 E1000_UNUSEDARG words, u16 *data)
+ {
+- s32 ret_val = 0;
++ s32 ret_val = E1000_SUCCESS;
++
++ DEBUGFUNC("e1000_read_invm_i210");
+
+ /* Only the MAC addr is required to be present in the iNVM */
+ switch (offset) {
+ case NVM_MAC_ADDR:
+- ret_val = igb_read_invm_word_i210(hw, (u8)offset, &data[0]);
+- ret_val |= igb_read_invm_word_i210(hw, (u8)offset+1,
++ ret_val = e1000_read_invm_word_i210(hw, (u8)offset, &data[0]);
++ ret_val |= e1000_read_invm_word_i210(hw, (u8)offset+1,
+ &data[1]);
+- ret_val |= igb_read_invm_word_i210(hw, (u8)offset+2,
++ ret_val |= e1000_read_invm_word_i210(hw, (u8)offset+2,
+ &data[2]);
+- if (ret_val)
+- hw_dbg("MAC Addr not found in iNVM\n");
++ if (ret_val != E1000_SUCCESS)
++ DEBUGOUT("MAC Addr not found in iNVM\n");
+ break;
+ case NVM_INIT_CTRL_2:
+- ret_val = igb_read_invm_word_i210(hw, (u8)offset, data);
+- if (ret_val) {
++ ret_val = e1000_read_invm_word_i210(hw, (u8)offset, data);
++ if (ret_val != E1000_SUCCESS) {
+ *data = NVM_INIT_CTRL_2_DEFAULT_I211;
+- ret_val = 0;
++ ret_val = E1000_SUCCESS;
+ }
+ break;
+ case NVM_INIT_CTRL_4:
+- ret_val = igb_read_invm_word_i210(hw, (u8)offset, data);
+- if (ret_val) {
++ ret_val = e1000_read_invm_word_i210(hw, (u8)offset, data);
++ if (ret_val != E1000_SUCCESS) {
+ *data = NVM_INIT_CTRL_4_DEFAULT_I211;
+- ret_val = 0;
++ ret_val = E1000_SUCCESS;
+ }
+ break;
+ case NVM_LED_1_CFG:
+- ret_val = igb_read_invm_word_i210(hw, (u8)offset, data);
+- if (ret_val) {
++ ret_val = e1000_read_invm_word_i210(hw, (u8)offset, data);
++ if (ret_val != E1000_SUCCESS) {
+ *data = NVM_LED_1_CFG_DEFAULT_I211;
+- ret_val = 0;
++ ret_val = E1000_SUCCESS;
+ }
+ break;
+ case NVM_LED_0_2_CFG:
+- ret_val = igb_read_invm_word_i210(hw, (u8)offset, data);
+- if (ret_val) {
++ ret_val = e1000_read_invm_word_i210(hw, (u8)offset, data);
++ if (ret_val != E1000_SUCCESS) {
+ *data = NVM_LED_0_2_CFG_DEFAULT_I211;
+- ret_val = 0;
++ ret_val = E1000_SUCCESS;
+ }
+ break;
+ case NVM_ID_LED_SETTINGS:
+- ret_val = igb_read_invm_word_i210(hw, (u8)offset, data);
+- if (ret_val) {
++ ret_val = e1000_read_invm_word_i210(hw, (u8)offset, data);
++ if (ret_val != E1000_SUCCESS) {
+ *data = ID_LED_RESERVED_FFFF;
+- ret_val = 0;
++ ret_val = E1000_SUCCESS;
+ }
+ break;
+ case NVM_SUB_DEV_ID:
+@@ -446,7 +471,7 @@
+ *data = hw->vendor_id;
+ break;
+ default:
+- hw_dbg("NVM word 0x%02x is not mapped.\n", offset);
++ DEBUGOUT1("NVM word 0x%02x is not mapped.\n", offset);
+ *data = NVM_RESERVED_WORD;
+ break;
+ }
+@@ -454,14 +479,15 @@
+ }
+
+ /**
+- * igb_read_invm_version - Reads iNVM version and image type
++ * e1000_read_invm_version - Reads iNVM version and image type
+ * @hw: pointer to the HW structure
+ * @invm_ver: version structure for the version read
+ *
+ * Reads iNVM version and image type.
+ **/
+-s32 igb_read_invm_version(struct e1000_hw *hw,
+- struct e1000_fw_version *invm_ver) {
++s32 e1000_read_invm_version(struct e1000_hw *hw,
++ struct e1000_fw_version *invm_ver)
++{
+ u32 *record = NULL;
+ u32 *next_record = NULL;
+ u32 i = 0;
+@@ -472,9 +498,11 @@
+ s32 status = -E1000_ERR_INVM_VALUE_NOT_FOUND;
+ u16 version = 0;
+
++ DEBUGFUNC("e1000_read_invm_version");
++
+ /* Read iNVM memory */
+ for (i = 0; i < E1000_INVM_SIZE; i++) {
+- invm_dword = rd32(E1000_INVM_DATA_REG(i));
++ invm_dword = E1000_READ_REG(hw, E1000_INVM_DATA_REG(i));
+ buffer[i] = invm_dword;
+ }
+
+@@ -486,17 +514,18 @@
+ /* Check if we have first version location used */
+ if ((i == 1) && ((*record & E1000_INVM_VER_FIELD_ONE) == 0)) {
+ version = 0;
+- status = 0;
++ status = E1000_SUCCESS;
+ break;
+ }
+ /* Check if we have second version location used */
+ else if ((i == 1) &&
+ ((*record & E1000_INVM_VER_FIELD_TWO) == 0)) {
+ version = (*record & E1000_INVM_VER_FIELD_ONE) >> 3;
+- status = 0;
++ status = E1000_SUCCESS;
+ break;
+ }
+- /* Check if we have odd version location
++ /*
++ * Check if we have odd version location
+ * used and it is the last one used
+ */
+ else if ((((*record & E1000_INVM_VER_FIELD_ONE) == 0) &&
+@@ -504,21 +533,22 @@
+ (i != 1))) {
+ version = (*next_record & E1000_INVM_VER_FIELD_TWO)
+ >> 13;
+- status = 0;
++ status = E1000_SUCCESS;
+ break;
+ }
+- /* Check if we have even version location
++ /*
++ * Check if we have even version location
+ * used and it is the last one used
+ */
+ else if (((*record & E1000_INVM_VER_FIELD_TWO) == 0) &&
+ ((*record & 0x3) == 0)) {
+ version = (*record & E1000_INVM_VER_FIELD_ONE) >> 3;
+- status = 0;
++ status = E1000_SUCCESS;
+ break;
+ }
+ }
+
+- if (!status) {
++ if (status == E1000_SUCCESS) {
+ invm_ver->invm_major = (version & E1000_INVM_MAJOR_MASK)
+ >> E1000_INVM_MAJOR_SHIFT;
+ invm_ver->invm_minor = version & E1000_INVM_MINOR_MASK;
+@@ -531,7 +561,7 @@
+ /* Check if we have image type in first location used */
+ if ((i == 1) && ((*record & E1000_INVM_IMGTYPE_FIELD) == 0)) {
+ invm_ver->invm_img_type = 0;
+- status = 0;
++ status = E1000_SUCCESS;
+ break;
+ }
+ /* Check if we have image type in first location used */
+@@ -540,7 +570,7 @@
+ ((((*record & 0x3) != 0) && (i != 1)))) {
+ invm_ver->invm_img_type =
+ (*next_record & E1000_INVM_IMGTYPE_FIELD) >> 23;
+- status = 0;
++ status = E1000_SUCCESS;
+ break;
+ }
+ }
+@@ -548,27 +578,30 @@
+ }
+
+ /**
+- * igb_validate_nvm_checksum_i210 - Validate EEPROM checksum
++ * e1000_validate_nvm_checksum_i210 - Validate EEPROM checksum
+ * @hw: pointer to the HW structure
+ *
+ * Calculates the EEPROM checksum by reading/adding each word of the EEPROM
+ * and then verifies that the sum of the EEPROM is equal to 0xBABA.
+ **/
+-static s32 igb_validate_nvm_checksum_i210(struct e1000_hw *hw)
++s32 e1000_validate_nvm_checksum_i210(struct e1000_hw *hw)
+ {
+- s32 status = 0;
++ s32 status = E1000_SUCCESS;
+ s32 (*read_op_ptr)(struct e1000_hw *, u16, u16, u16 *);
+
+- if (!(hw->nvm.ops.acquire(hw))) {
++ DEBUGFUNC("e1000_validate_nvm_checksum_i210");
++
++ if (hw->nvm.ops.acquire(hw) == E1000_SUCCESS) {
+
+- /* Replace the read function with semaphore grabbing with
++ /*
++ * Replace the read function with semaphore grabbing with
+ * the one that skips this for a while.
+ * We have semaphore taken already here.
+ */
+ read_op_ptr = hw->nvm.ops.read;
+- hw->nvm.ops.read = igb_read_nvm_eerd;
++ hw->nvm.ops.read = e1000_read_nvm_eerd;
+
+- status = igb_validate_nvm_checksum(hw);
++ status = e1000_validate_nvm_checksum_generic(hw);
+
+ /* Revert original read operation. */
+ hw->nvm.ops.read = read_op_ptr;
+@@ -581,147 +614,208 @@
+ return status;
+ }
+
++
+ /**
+- * igb_update_nvm_checksum_i210 - Update EEPROM checksum
++ * e1000_update_nvm_checksum_i210 - Update EEPROM checksum
+ * @hw: pointer to the HW structure
+ *
+ * Updates the EEPROM checksum by reading/adding each word of the EEPROM
+ * up to the checksum. Then calculates the EEPROM checksum and writes the
+ * value to the EEPROM. Next commit EEPROM data onto the Flash.
+ **/
+-static s32 igb_update_nvm_checksum_i210(struct e1000_hw *hw)
++s32 e1000_update_nvm_checksum_i210(struct e1000_hw *hw)
+ {
+- s32 ret_val = 0;
++ s32 ret_val;
+ u16 checksum = 0;
+ u16 i, nvm_data;
+
+- /* Read the first word from the EEPROM. If this times out or fails, do
++ DEBUGFUNC("e1000_update_nvm_checksum_i210");
++
++ /*
++ * Read the first word from the EEPROM. If this times out or fails, do
+ * not continue or we could be in for a very long wait while every
+ * EEPROM read fails
+ */
+- ret_val = igb_read_nvm_eerd(hw, 0, 1, &nvm_data);
+- if (ret_val) {
+- hw_dbg("EEPROM read failed\n");
++ ret_val = e1000_read_nvm_eerd(hw, 0, 1, &nvm_data);
++ if (ret_val != E1000_SUCCESS) {
++ DEBUGOUT("EEPROM read failed\n");
+ goto out;
+ }
+
+- if (!(hw->nvm.ops.acquire(hw))) {
+- /* Do not use hw->nvm.ops.write, hw->nvm.ops.read
++ if (hw->nvm.ops.acquire(hw) == E1000_SUCCESS) {
++ /*
++ * Do not use hw->nvm.ops.write, hw->nvm.ops.read
+ * because we do not want to take the synchronization
+ * semaphores twice here.
+ */
+
+ for (i = 0; i < NVM_CHECKSUM_REG; i++) {
+- ret_val = igb_read_nvm_eerd(hw, i, 1, &nvm_data);
++ ret_val = e1000_read_nvm_eerd(hw, i, 1, &nvm_data);
+ if (ret_val) {
+ hw->nvm.ops.release(hw);
+- hw_dbg("NVM Read Error while updating checksum.\n");
++ DEBUGOUT("NVM Read Error while updating checksum.\n");
+ goto out;
+ }
+ checksum += nvm_data;
+ }
+ checksum = (u16) NVM_SUM - checksum;
+- ret_val = igb_write_nvm_srwr(hw, NVM_CHECKSUM_REG, 1,
++ ret_val = e1000_write_nvm_srwr(hw, NVM_CHECKSUM_REG, 1,
+ &checksum);
+- if (ret_val) {
++ if (ret_val != E1000_SUCCESS) {
+ hw->nvm.ops.release(hw);
+- hw_dbg("NVM Write Error while updating checksum.\n");
++ DEBUGOUT("NVM Write Error while updating checksum.\n");
+ goto out;
+ }
+
+ hw->nvm.ops.release(hw);
+
+- ret_val = igb_update_flash_i210(hw);
++ ret_val = e1000_update_flash_i210(hw);
+ } else {
+- ret_val = -E1000_ERR_SWFW_SYNC;
++ ret_val = E1000_ERR_SWFW_SYNC;
++ }
++out:
++ return ret_val;
++}
++
++/**
++ * e1000_get_flash_presence_i210 - Check if flash device is detected.
++ * @hw: pointer to the HW structure
++ *
++ **/
++bool e1000_get_flash_presence_i210(struct e1000_hw *hw)
++{
++ u32 eec = 0;
++ bool ret_val = false;
++
++ DEBUGFUNC("e1000_get_flash_presence_i210");
++
++ eec = E1000_READ_REG(hw, E1000_EECD);
++
++ if (eec & E1000_EECD_FLASH_DETECTED_I210)
++ ret_val = true;
++
++ return ret_val;
++}
++
++/**
++ * e1000_update_flash_i210 - Commit EEPROM to the flash
++ * @hw: pointer to the HW structure
++ *
++ **/
++s32 e1000_update_flash_i210(struct e1000_hw *hw)
++{
++ s32 ret_val;
++ u32 flup;
++
++ DEBUGFUNC("e1000_update_flash_i210");
++
++ ret_val = e1000_pool_flash_update_done_i210(hw);
++ if (ret_val == -E1000_ERR_NVM) {
++ DEBUGOUT("Flash update time out\n");
++ goto out;
+ }
++
++ flup = E1000_READ_REG(hw, E1000_EECD) | E1000_EECD_FLUPD_I210;
++ E1000_WRITE_REG(hw, E1000_EECD, flup);
++
++ ret_val = e1000_pool_flash_update_done_i210(hw);
++ if (ret_val == E1000_SUCCESS)
++ DEBUGOUT("Flash update complete\n");
++ else
++ DEBUGOUT("Flash update time out\n");
++
+ out:
+ return ret_val;
+ }
+
+ /**
+- * igb_pool_flash_update_done_i210 - Pool FLUDONE status.
++ * e1000_pool_flash_update_done_i210 - Pool FLUDONE status.
+ * @hw: pointer to the HW structure
+ *
+ **/
+-static s32 igb_pool_flash_update_done_i210(struct e1000_hw *hw)
++s32 e1000_pool_flash_update_done_i210(struct e1000_hw *hw)
+ {
+ s32 ret_val = -E1000_ERR_NVM;
+ u32 i, reg;
+
++ DEBUGFUNC("e1000_pool_flash_update_done_i210");
++
+ for (i = 0; i < E1000_FLUDONE_ATTEMPTS; i++) {
+- reg = rd32(E1000_EECD);
++ reg = E1000_READ_REG(hw, E1000_EECD);
+ if (reg & E1000_EECD_FLUDONE_I210) {
+- ret_val = 0;
++ ret_val = E1000_SUCCESS;
+ break;
+ }
+- udelay(5);
++ usec_delay(5);
+ }
+
+ return ret_val;
+ }
+
+ /**
+- * igb_get_flash_presence_i210 - Check if flash device is detected.
++ * e1000_init_nvm_params_i210 - Initialize i210 NVM function pointers
+ * @hw: pointer to the HW structure
+ *
++ * Initialize the i210/i211 NVM parameters and function pointers.
+ **/
+-bool igb_get_flash_presence_i210(struct e1000_hw *hw)
++static s32 e1000_init_nvm_params_i210(struct e1000_hw *hw)
+ {
+- u32 eec = 0;
+- bool ret_val = false;
++ s32 ret_val;
++ struct e1000_nvm_info *nvm = &hw->nvm;
+
+- eec = rd32(E1000_EECD);
+- if (eec & E1000_EECD_FLASH_DETECTED_I210)
+- ret_val = true;
++ DEBUGFUNC("e1000_init_nvm_params_i210");
+
++ ret_val = e1000_init_nvm_params_82575(hw);
++ nvm->ops.acquire = e1000_acquire_nvm_i210;
++ nvm->ops.release = e1000_release_nvm_i210;
++ nvm->ops.valid_led_default = e1000_valid_led_default_i210;
++ if (e1000_get_flash_presence_i210(hw)) {
++ hw->nvm.type = e1000_nvm_flash_hw;
++ nvm->ops.read = e1000_read_nvm_srrd_i210;
++ nvm->ops.write = e1000_write_nvm_srwr_i210;
++ nvm->ops.validate = e1000_validate_nvm_checksum_i210;
++ nvm->ops.update = e1000_update_nvm_checksum_i210;
++ } else {
++ hw->nvm.type = e1000_nvm_invm;
++ nvm->ops.read = e1000_read_invm_i210;
++ nvm->ops.write = e1000_null_write_nvm;
++ nvm->ops.validate = e1000_null_ops_generic;
++ nvm->ops.update = e1000_null_ops_generic;
++ }
+ return ret_val;
+ }
+
+ /**
+- * igb_update_flash_i210 - Commit EEPROM to the flash
++ * e1000_init_function_pointers_i210 - Init func ptrs.
+ * @hw: pointer to the HW structure
+ *
++ * Called to initialize all function pointers and parameters.
+ **/
+-static s32 igb_update_flash_i210(struct e1000_hw *hw)
++void e1000_init_function_pointers_i210(struct e1000_hw *hw)
+ {
+- s32 ret_val = 0;
+- u32 flup;
+-
+- ret_val = igb_pool_flash_update_done_i210(hw);
+- if (ret_val == -E1000_ERR_NVM) {
+- hw_dbg("Flash update time out\n");
+- goto out;
+- }
++ e1000_init_function_pointers_82575(hw);
++ hw->nvm.ops.init_params = e1000_init_nvm_params_i210;
+
+- flup = rd32(E1000_EECD) | E1000_EECD_FLUPD_I210;
+- wr32(E1000_EECD, flup);
+-
+- ret_val = igb_pool_flash_update_done_i210(hw);
+- if (ret_val)
+- hw_dbg("Flash update complete\n");
+- else
+- hw_dbg("Flash update time out\n");
+-
+-out:
+- return ret_val;
++ return;
+ }
+
+ /**
+- * igb_valid_led_default_i210 - Verify a valid default LED config
++ * e1000_valid_led_default_i210 - Verify a valid default LED config
+ * @hw: pointer to the HW structure
+ * @data: pointer to the NVM (EEPROM)
+ *
+ * Read the EEPROM for the current default LED configuration. If the
+ * LED configuration is not valid, set to a valid LED configuration.
+ **/
+-s32 igb_valid_led_default_i210(struct e1000_hw *hw, u16 *data)
++static s32 e1000_valid_led_default_i210(struct e1000_hw *hw, u16 *data)
+ {
+ s32 ret_val;
+
++ DEBUGFUNC("e1000_valid_led_default_i210");
++
+ ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data);
+ if (ret_val) {
+- hw_dbg("NVM Read Error\n");
++ DEBUGOUT("NVM Read Error\n");
+ goto out;
+ }
+
+@@ -741,17 +835,19 @@
+ }
+
+ /**
+- * __igb_access_xmdio_reg - Read/write XMDIO register
++ * __e1000_access_xmdio_reg - Read/write XMDIO register
+ * @hw: pointer to the HW structure
+ * @address: XMDIO address to program
+ * @dev_addr: device address to program
+ * @data: pointer to value to read/write from/to the XMDIO address
+ * @read: boolean flag to indicate read or write
+ **/
+-static s32 __igb_access_xmdio_reg(struct e1000_hw *hw, u16 address,
+- u8 dev_addr, u16 *data, bool read)
++static s32 __e1000_access_xmdio_reg(struct e1000_hw *hw, u16 address,
++ u8 dev_addr, u16 *data, bool read)
+ {
+- s32 ret_val = 0;
++ s32 ret_val;
++
++ DEBUGFUNC("__e1000_access_xmdio_reg");
+
+ ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAC, dev_addr);
+ if (ret_val)
+@@ -782,67 +878,41 @@
+ }
+
+ /**
+- * igb_read_xmdio_reg - Read XMDIO register
++ * e1000_read_xmdio_reg - Read XMDIO register
+ * @hw: pointer to the HW structure
+ * @addr: XMDIO address to program
+ * @dev_addr: device address to program
+ * @data: value to be read from the EMI address
+ **/
+-s32 igb_read_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 *data)
++s32 e1000_read_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 *data)
+ {
+- return __igb_access_xmdio_reg(hw, addr, dev_addr, data, true);
++ DEBUGFUNC("e1000_read_xmdio_reg");
++
++ return __e1000_access_xmdio_reg(hw, addr, dev_addr, data, true);
+ }
+
+ /**
+- * igb_write_xmdio_reg - Write XMDIO register
++ * e1000_write_xmdio_reg - Write XMDIO register
+ * @hw: pointer to the HW structure
+ * @addr: XMDIO address to program
+ * @dev_addr: device address to program
+ * @data: value to be written to the XMDIO address
+ **/
+-s32 igb_write_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 data)
+-{
+- return __igb_access_xmdio_reg(hw, addr, dev_addr, &data, false);
+-}
+-
+-/**
+- * igb_init_nvm_params_i210 - Init NVM func ptrs.
+- * @hw: pointer to the HW structure
+- **/
+-s32 igb_init_nvm_params_i210(struct e1000_hw *hw)
++s32 e1000_write_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 data)
+ {
+- s32 ret_val = 0;
+- struct e1000_nvm_info *nvm = &hw->nvm;
++ DEBUGFUNC("e1000_read_xmdio_reg");
+
+- nvm->ops.acquire = igb_acquire_nvm_i210;
+- nvm->ops.release = igb_release_nvm_i210;
+- nvm->ops.valid_led_default = igb_valid_led_default_i210;
+-
+- /* NVM Function Pointers */
+- if (igb_get_flash_presence_i210(hw)) {
+- hw->nvm.type = e1000_nvm_flash_hw;
+- nvm->ops.read = igb_read_nvm_srrd_i210;
+- nvm->ops.write = igb_write_nvm_srwr_i210;
+- nvm->ops.validate = igb_validate_nvm_checksum_i210;
+- nvm->ops.update = igb_update_nvm_checksum_i210;
+- } else {
+- hw->nvm.type = e1000_nvm_invm;
+- nvm->ops.read = igb_read_invm_i210;
+- nvm->ops.write = NULL;
+- nvm->ops.validate = NULL;
+- nvm->ops.update = NULL;
+- }
+- return ret_val;
++ return __e1000_access_xmdio_reg(hw, addr, dev_addr, &data, false);
+ }
+
+ /**
+- * igb_pll_workaround_i210
++ * e1000_pll_workaround_i210
+ * @hw: pointer to the HW structure
+ *
+ * Works around an errata in the PLL circuit where it occasionally
+ * provides the wrong clock frequency after power up.
+ **/
+-s32 igb_pll_workaround_i210(struct e1000_hw *hw)
++static s32 e1000_pll_workaround_i210(struct e1000_hw *hw)
+ {
+ s32 ret_val;
+ u32 wuc, mdicnfg, ctrl, ctrl_ext, reg_val;
+@@ -850,53 +920,104 @@
+ int i;
+
+ /* Get and set needed register values */
+- wuc = rd32(E1000_WUC);
+- mdicnfg = rd32(E1000_MDICNFG);
++ wuc = E1000_READ_REG(hw, E1000_WUC);
++ mdicnfg = E1000_READ_REG(hw, E1000_MDICNFG);
+ reg_val = mdicnfg & ~E1000_MDICNFG_EXT_MDIO;
+- wr32(E1000_MDICNFG, reg_val);
++ E1000_WRITE_REG(hw, E1000_MDICNFG, reg_val);
+
+ /* Get data from NVM, or set default */
+- ret_val = igb_read_invm_word_i210(hw, E1000_INVM_AUTOLOAD,
+- &nvm_word);
+- if (ret_val)
++ ret_val = e1000_read_invm_word_i210(hw, E1000_INVM_AUTOLOAD,
++ &nvm_word);
++ if (ret_val != E1000_SUCCESS)
+ nvm_word = E1000_INVM_DEFAULT_AL;
+ tmp_nvm = nvm_word | E1000_INVM_PLL_WO_VAL;
+ for (i = 0; i < E1000_MAX_PLL_TRIES; i++) {
+ /* check current state directly from internal PHY */
+- igb_read_phy_reg_gs40g(hw, (E1000_PHY_PLL_FREQ_PAGE |
++ e1000_read_phy_reg_gs40g(hw, (E1000_PHY_PLL_FREQ_PAGE |
+ E1000_PHY_PLL_FREQ_REG), &phy_word);
+ if ((phy_word & E1000_PHY_PLL_UNCONF)
+ != E1000_PHY_PLL_UNCONF) {
+- ret_val = 0;
++ ret_val = E1000_SUCCESS;
+ break;
+ } else {
+ ret_val = -E1000_ERR_PHY;
+ }
+ /* directly reset the internal PHY */
+- ctrl = rd32(E1000_CTRL);
+- wr32(E1000_CTRL, ctrl|E1000_CTRL_PHY_RST);
++ ctrl = E1000_READ_REG(hw, E1000_CTRL);
++ E1000_WRITE_REG(hw, E1000_CTRL, ctrl|E1000_CTRL_PHY_RST);
+
+- ctrl_ext = rd32(E1000_CTRL_EXT);
++ ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
+ ctrl_ext |= (E1000_CTRL_EXT_PHYPDEN | E1000_CTRL_EXT_SDLPE);
+- wr32(E1000_CTRL_EXT, ctrl_ext);
++ E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
+
+- wr32(E1000_WUC, 0);
++ E1000_WRITE_REG(hw, E1000_WUC, 0);
+ reg_val = (E1000_INVM_AUTOLOAD << 4) | (tmp_nvm << 16);
+- wr32(E1000_EEARBC_I210, reg_val);
++ E1000_WRITE_REG(hw, E1000_EEARBC_I210, reg_val);
+
+- igb_read_pci_cfg(hw, E1000_PCI_PMCSR, &pci_word);
++ e1000_read_pci_cfg(hw, E1000_PCI_PMCSR, &pci_word);
+ pci_word |= E1000_PCI_PMCSR_D3;
+- igb_write_pci_cfg(hw, E1000_PCI_PMCSR, &pci_word);
+- usleep_range(1000, 2000);
++ e1000_write_pci_cfg(hw, E1000_PCI_PMCSR, &pci_word);
++ msec_delay(1);
+ pci_word &= ~E1000_PCI_PMCSR_D3;
+- igb_write_pci_cfg(hw, E1000_PCI_PMCSR, &pci_word);
++ e1000_write_pci_cfg(hw, E1000_PCI_PMCSR, &pci_word);
+ reg_val = (E1000_INVM_AUTOLOAD << 4) | (nvm_word << 16);
+- wr32(E1000_EEARBC_I210, reg_val);
++ E1000_WRITE_REG(hw, E1000_EEARBC_I210, reg_val);
+
+ /* restore WUC register */
+- wr32(E1000_WUC, wuc);
++ E1000_WRITE_REG(hw, E1000_WUC, wuc);
+ }
+ /* restore MDICNFG setting */
+- wr32(E1000_MDICNFG, mdicnfg);
++ E1000_WRITE_REG(hw, E1000_MDICNFG, mdicnfg);
++ return ret_val;
++}
++
++/**
++ * e1000_get_cfg_done_i210 - Read config done bit
++ * @hw: pointer to the HW structure
++ *
++ * Read the management control register for the config done bit for
++ * completion status. NOTE: silicon which is EEPROM-less will fail trying
++ * to read the config done bit, so an error is *ONLY* logged and returns
++ * E1000_SUCCESS. If we were to return with error, EEPROM-less silicon
++ * would not be able to be reset or change link.
++ **/
++static s32 e1000_get_cfg_done_i210(struct e1000_hw *hw)
++{
++ s32 timeout = PHY_CFG_TIMEOUT;
++ u32 mask = E1000_NVM_CFG_DONE_PORT_0;
++
++ DEBUGFUNC("e1000_get_cfg_done_i210");
++
++ while (timeout) {
++ if (E1000_READ_REG(hw, E1000_EEMNGCTL_I210) & mask)
++ break;
++ msec_delay(1);
++ timeout--;
++ }
++ if (!timeout)
++ DEBUGOUT("MNG configuration cycle has not completed.\n");
++
++ return E1000_SUCCESS;
++}
++
++/**
++ * e1000_init_hw_i210 - Init hw for I210/I211
++ * @hw: pointer to the HW structure
++ *
++ * Called to initialize hw for i210 hw family.
++ **/
++s32 e1000_init_hw_i210(struct e1000_hw *hw)
++{
++ s32 ret_val;
++
++ DEBUGFUNC("e1000_init_hw_i210");
++ if ((hw->mac.type >= e1000_i210) &&
++ !(e1000_get_flash_presence_i210(hw))) {
++ ret_val = e1000_pll_workaround_i210(hw);
++ if (ret_val != E1000_SUCCESS)
++ return ret_val;
++ }
++ hw->phy.ops.get_cfg_done = e1000_get_cfg_done_i210;
++ ret_val = e1000_init_hw_82575(hw);
+ return ret_val;
+ }
+diff -Nu a/drivers/net/ethernet/intel/igb/e1000_i210.h b/drivers/net/ethernet/intel/igb/e1000_i210.h
+--- a/drivers/net/ethernet/intel/igb/e1000_i210.h 2016-11-13 09:20:24.790171605 +0000
++++ b/drivers/net/ethernet/intel/igb/e1000_i210.h 2016-11-14 14:32:08.579567168 +0000
+@@ -1,39 +1,47 @@
+-/* Intel(R) Gigabit Ethernet Linux driver
+- * Copyright(c) 2007-2014 Intel Corporation.
+- *
+- * This program is free software; you can redistribute it and/or modify it
+- * under the terms and conditions of the GNU General Public License,
+- * version 2, as published by the Free Software Foundation.
+- *
+- * This program is distributed in the hope it will be useful, but WITHOUT
+- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+- * more details.
+- *
+- * You should have received a copy of the GNU General Public License along with
+- * this program; if not, see .
+- *
+- * The full GNU General Public License is included in this distribution in
+- * the file called "COPYING".
+- *
+- * Contact Information:
+- * e1000-devel Mailing List
+- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+- */
++/*******************************************************************************
++
++ Intel(R) Gigabit Ethernet Linux driver
++ Copyright(c) 2007-2015 Intel Corporation.
++
++ This program is free software; you can redistribute it and/or modify it
++ under the terms and conditions of the GNU General Public License,
++ version 2, as published by the Free Software Foundation.
++
++ This program is distributed in the hope it will be useful, but WITHOUT
++ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ more details.
++
++ The full GNU General Public License is included in this distribution in
++ the file called "COPYING".
++
++ Contact Information:
++ Linux NICS
++ e1000-devel Mailing List
++ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
++
++*******************************************************************************/
+
+ #ifndef _E1000_I210_H_
+ #define _E1000_I210_H_
+
+-s32 igb_acquire_swfw_sync_i210(struct e1000_hw *hw, u16 mask);
+-void igb_release_swfw_sync_i210(struct e1000_hw *hw, u16 mask);
+-s32 igb_valid_led_default_i210(struct e1000_hw *hw, u16 *data);
+-s32 igb_read_invm_version(struct e1000_hw *hw,
+- struct e1000_fw_version *invm_ver);
+-s32 igb_read_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 *data);
+-s32 igb_write_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 data);
+-s32 igb_init_nvm_params_i210(struct e1000_hw *hw);
+-bool igb_get_flash_presence_i210(struct e1000_hw *hw);
+-s32 igb_pll_workaround_i210(struct e1000_hw *hw);
++bool e1000_get_flash_presence_i210(struct e1000_hw *hw);
++s32 e1000_update_flash_i210(struct e1000_hw *hw);
++s32 e1000_update_nvm_checksum_i210(struct e1000_hw *hw);
++s32 e1000_validate_nvm_checksum_i210(struct e1000_hw *hw);
++s32 e1000_write_nvm_srwr_i210(struct e1000_hw *hw, u16 offset,
++ u16 words, u16 *data);
++s32 e1000_read_nvm_srrd_i210(struct e1000_hw *hw, u16 offset,
++ u16 words, u16 *data);
++s32 e1000_read_invm_version(struct e1000_hw *hw,
++ struct e1000_fw_version *invm_ver);
++s32 e1000_acquire_swfw_sync_i210(struct e1000_hw *hw, u16 mask);
++void e1000_release_swfw_sync_i210(struct e1000_hw *hw, u16 mask);
++s32 e1000_read_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr,
++ u16 *data);
++s32 e1000_write_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr,
++ u16 data);
++s32 e1000_init_hw_i210(struct e1000_hw *hw);
+
+ #define E1000_STM_OPCODE 0xDB00
+ #define E1000_EEPROM_FLASH_SIZE_WORD 0x11
+@@ -56,15 +64,15 @@
+
+ #define E1000_INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS 8
+ #define E1000_INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS 1
+-#define E1000_INVM_ULT_BYTES_SIZE 8
+-#define E1000_INVM_RECORD_SIZE_IN_BYTES 4
+-#define E1000_INVM_VER_FIELD_ONE 0x1FF8
+-#define E1000_INVM_VER_FIELD_TWO 0x7FE000
+-#define E1000_INVM_IMGTYPE_FIELD 0x1F800000
+-
+-#define E1000_INVM_MAJOR_MASK 0x3F0
+-#define E1000_INVM_MINOR_MASK 0xF
+-#define E1000_INVM_MAJOR_SHIFT 4
++#define E1000_INVM_ULT_BYTES_SIZE 8
++#define E1000_INVM_RECORD_SIZE_IN_BYTES 4
++#define E1000_INVM_VER_FIELD_ONE 0x1FF8
++#define E1000_INVM_VER_FIELD_TWO 0x7FE000
++#define E1000_INVM_IMGTYPE_FIELD 0x1F800000
++
++#define E1000_INVM_MAJOR_MASK 0x3F0
++#define E1000_INVM_MINOR_MASK 0xF
++#define E1000_INVM_MAJOR_SHIFT 4
+
+ #define ID_LED_DEFAULT_I210 ((ID_LED_OFF1_ON2 << 8) | \
+ (ID_LED_DEF1_DEF2 << 4) | \
+@@ -73,7 +81,7 @@
+ (ID_LED_DEF1_DEF2 << 4) | \
+ (ID_LED_OFF1_ON2))
+
+-/* NVM offset defaults for i211 device */
++/* NVM offset defaults for I211 devices */
+ #define NVM_INIT_CTRL_2_DEFAULT_I211 0X7243
+ #define NVM_INIT_CTRL_4_DEFAULT_I211 0x00C1
+ #define NVM_LED_1_CFG_DEFAULT_I211 0x0184
+diff -Nu a/drivers/net/ethernet/intel/igb/e1000_mac.c b/drivers/net/ethernet/intel/igb/e1000_mac.c
+--- a/drivers/net/ethernet/intel/igb/e1000_mac.c 2016-11-13 09:20:24.790171605 +0000
++++ b/drivers/net/ethernet/intel/igb/e1000_mac.c 2016-11-14 14:32:08.579567168 +0000
+@@ -1,68 +1,179 @@
+-/* Intel(R) Gigabit Ethernet Linux driver
+- * Copyright(c) 2007-2014 Intel Corporation.
+- *
+- * This program is free software; you can redistribute it and/or modify it
+- * under the terms and conditions of the GNU General Public License,
+- * version 2, as published by the Free Software Foundation.
+- *
+- * This program is distributed in the hope it will be useful, but WITHOUT
+- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+- * more details.
+- *
+- * You should have received a copy of the GNU General Public License along with
+- * this program; if not, see .
+- *
+- * The full GNU General Public License is included in this distribution in
+- * the file called "COPYING".
++/*******************************************************************************
++
++ Intel(R) Gigabit Ethernet Linux driver
++ Copyright(c) 2007-2015 Intel Corporation.
++
++ This program is free software; you can redistribute it and/or modify it
++ under the terms and conditions of the GNU General Public License,
++ version 2, as published by the Free Software Foundation.
++
++ This program is distributed in the hope it will be useful, but WITHOUT
++ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ more details.
++
++ The full GNU General Public License is included in this distribution in
++ the file called "COPYING".
++
++ Contact Information:
++ Linux NICS
++ e1000-devel Mailing List
++ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
++
++*******************************************************************************/
++
++#include "e1000_api.h"
++
++static s32 e1000_validate_mdi_setting_generic(struct e1000_hw *hw);
++static void e1000_set_lan_id_multi_port_pcie(struct e1000_hw *hw);
++static void e1000_config_collision_dist_generic(struct e1000_hw *hw);
++static int e1000_rar_set_generic(struct e1000_hw *hw, u8 *addr, u32 index);
++
++/**
++ * e1000_init_mac_ops_generic - Initialize MAC function pointers
++ * @hw: pointer to the HW structure
+ *
+- * Contact Information:
+- * e1000-devel Mailing List
+- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+- */
++ * Setups up the function pointers to no-op functions
++ **/
++void e1000_init_mac_ops_generic(struct e1000_hw *hw)
++{
++ struct e1000_mac_info *mac = &hw->mac;
++ DEBUGFUNC("e1000_init_mac_ops_generic");
++
++ /* General Setup */
++ mac->ops.init_params = e1000_null_ops_generic;
++ mac->ops.init_hw = e1000_null_ops_generic;
++ mac->ops.reset_hw = e1000_null_ops_generic;
++ mac->ops.setup_physical_interface = e1000_null_ops_generic;
++ mac->ops.get_bus_info = e1000_null_ops_generic;
++ mac->ops.set_lan_id = e1000_set_lan_id_multi_port_pcie;
++ mac->ops.read_mac_addr = igb_e1000_read_mac_addr_generic;
++ mac->ops.config_collision_dist = e1000_config_collision_dist_generic;
++ mac->ops.clear_hw_cntrs = e1000_null_mac_generic;
++ /* LED */
++ mac->ops.cleanup_led = e1000_null_ops_generic;
++ mac->ops.setup_led = e1000_null_ops_generic;
++ mac->ops.blink_led = e1000_null_ops_generic;
++ mac->ops.led_on = e1000_null_ops_generic;
++ mac->ops.led_off = e1000_null_ops_generic;
++ /* LINK */
++ mac->ops.setup_link = e1000_null_ops_generic;
++ mac->ops.get_link_up_info = e1000_null_link_info;
++ mac->ops.check_for_link = e1000_null_ops_generic;
++ /* Management */
++ mac->ops.check_mng_mode = e1000_null_mng_mode;
++ /* VLAN, MC, etc. */
++ mac->ops.update_mc_addr_list = e1000_null_update_mc;
++ mac->ops.clear_vfta = e1000_null_mac_generic;
++ mac->ops.write_vfta = e1000_null_write_vfta;
++ mac->ops.rar_set = e1000_rar_set_generic;
++ mac->ops.validate_mdi_setting = e1000_validate_mdi_setting_generic;
++}
++
++/**
++ * e1000_null_ops_generic - No-op function, returns 0
++ * @hw: pointer to the HW structure
++ **/
++s32 e1000_null_ops_generic(struct e1000_hw E1000_UNUSEDARG *hw)
++{
++ DEBUGFUNC("e1000_null_ops_generic");
++ return E1000_SUCCESS;
++}
++
++/**
++ * e1000_null_mac_generic - No-op function, return void
++ * @hw: pointer to the HW structure
++ **/
++void e1000_null_mac_generic(struct e1000_hw E1000_UNUSEDARG *hw)
++{
++ DEBUGFUNC("e1000_null_mac_generic");
++ return;
++}
+
+-#include
+-#include
+-#include
+-#include
+-#include
++/**
++ * e1000_null_link_info - No-op function, return 0
++ * @hw: pointer to the HW structure
++ **/
++s32 e1000_null_link_info(struct e1000_hw E1000_UNUSEDARG *hw,
++ u16 E1000_UNUSEDARG *s, u16 E1000_UNUSEDARG *d)
++{
++ DEBUGFUNC("e1000_null_link_info");
++ return E1000_SUCCESS;
++}
+
+-#include "e1000_mac.h"
++/**
++ * e1000_null_mng_mode - No-op function, return false
++ * @hw: pointer to the HW structure
++ **/
++bool e1000_null_mng_mode(struct e1000_hw E1000_UNUSEDARG *hw)
++{
++ DEBUGFUNC("e1000_null_mng_mode");
++ return false;
++}
+
+-#include "igb.h"
++/**
++ * e1000_null_update_mc - No-op function, return void
++ * @hw: pointer to the HW structure
++ **/
++void e1000_null_update_mc(struct e1000_hw E1000_UNUSEDARG *hw,
++ u8 E1000_UNUSEDARG *h, u32 E1000_UNUSEDARG a)
++{
++ DEBUGFUNC("e1000_null_update_mc");
++ return;
++}
+
+-static s32 igb_set_default_fc(struct e1000_hw *hw);
+-static s32 igb_set_fc_watermarks(struct e1000_hw *hw);
++/**
++ * e1000_null_write_vfta - No-op function, return void
++ * @hw: pointer to the HW structure
++ **/
++void e1000_null_write_vfta(struct e1000_hw E1000_UNUSEDARG *hw,
++ u32 E1000_UNUSEDARG a, u32 E1000_UNUSEDARG b)
++{
++ DEBUGFUNC("e1000_null_write_vfta");
++ return;
++}
+
+ /**
+- * igb_get_bus_info_pcie - Get PCIe bus information
++ * e1000_null_rar_set - No-op function, return 0
++ * @hw: pointer to the HW structure
++ **/
++int e1000_null_rar_set(struct e1000_hw E1000_UNUSEDARG *hw,
++ u8 E1000_UNUSEDARG *h, u32 E1000_UNUSEDARG a)
++{
++ DEBUGFUNC("e1000_null_rar_set");
++ return E1000_SUCCESS;
++}
++
++/**
++ * igb_e1000_get_bus_info_pcie_generic - Get PCIe bus information
+ * @hw: pointer to the HW structure
+ *
+ * Determines and stores the system bus information for a particular
+ * network interface. The following bus information is determined and stored:
+ * bus speed, bus width, type (PCIe), and PCIe function.
+ **/
+-s32 igb_get_bus_info_pcie(struct e1000_hw *hw)
++s32 igb_e1000_get_bus_info_pcie_generic(struct e1000_hw *hw)
+ {
++ struct e1000_mac_info *mac = &hw->mac;
+ struct e1000_bus_info *bus = &hw->bus;
+ s32 ret_val;
+- u32 reg;
+ u16 pcie_link_status;
+
++ DEBUGFUNC("igb_e1000_get_bus_info_pcie_generic");
++
+ bus->type = e1000_bus_type_pci_express;
+
+- ret_val = igb_read_pcie_cap_reg(hw,
+- PCI_EXP_LNKSTA,
+- &pcie_link_status);
++ ret_val = e1000_read_pcie_cap_reg(hw, PCIE_LINK_STATUS,
++ &pcie_link_status);
+ if (ret_val) {
+ bus->width = e1000_bus_width_unknown;
+ bus->speed = e1000_bus_speed_unknown;
+ } else {
+- switch (pcie_link_status & PCI_EXP_LNKSTA_CLS) {
+- case PCI_EXP_LNKSTA_CLS_2_5GB:
++ switch (pcie_link_status & PCIE_LINK_SPEED_MASK) {
++ case PCIE_LINK_SPEED_2500:
+ bus->speed = e1000_bus_speed_2500;
+ break;
+- case PCI_EXP_LNKSTA_CLS_5_0GB:
++ case PCIE_LINK_SPEED_5000:
+ bus->speed = e1000_bus_speed_5000;
+ break;
+ default:
+@@ -71,75 +182,70 @@
+ }
+
+ bus->width = (enum e1000_bus_width)((pcie_link_status &
+- PCI_EXP_LNKSTA_NLW) >>
+- PCI_EXP_LNKSTA_NLW_SHIFT);
++ PCIE_LINK_WIDTH_MASK) >> PCIE_LINK_WIDTH_SHIFT);
+ }
+
+- reg = rd32(E1000_STATUS);
+- bus->func = (reg & E1000_STATUS_FUNC_MASK) >> E1000_STATUS_FUNC_SHIFT;
++ mac->ops.set_lan_id(hw);
+
+- return 0;
++ return E1000_SUCCESS;
+ }
+
+ /**
+- * igb_clear_vfta - Clear VLAN filter table
++ * e1000_set_lan_id_multi_port_pcie - Set LAN id for PCIe multiple port devices
++ *
+ * @hw: pointer to the HW structure
+ *
+- * Clears the register array which contains the VLAN filter table by
+- * setting all the values to 0.
++ * Determines the LAN function id by reading memory-mapped registers
++ * and swaps the port value if requested.
+ **/
+-void igb_clear_vfta(struct e1000_hw *hw)
++static void e1000_set_lan_id_multi_port_pcie(struct e1000_hw *hw)
+ {
+- u32 offset;
++ struct e1000_bus_info *bus = &hw->bus;
++ u32 reg;
+
+- for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) {
+- array_wr32(E1000_VFTA, offset, 0);
+- wrfl();
+- }
++ /* The status register reports the correct function number
++ * for the device regardless of function swap state.
++ */
++ reg = E1000_READ_REG(hw, E1000_STATUS);
++ bus->func = (reg & E1000_STATUS_FUNC_MASK) >> E1000_STATUS_FUNC_SHIFT;
+ }
+
+ /**
+- * igb_write_vfta - Write value to VLAN filter table
++ * igb_e1000_set_lan_id_single_port - Set LAN id for a single port device
+ * @hw: pointer to the HW structure
+- * @offset: register offset in VLAN filter table
+- * @value: register value written to VLAN filter table
+ *
+- * Writes value at the given offset in the register array which stores
+- * the VLAN filter table.
++ * Sets the LAN function id to zero for a single port device.
+ **/
+-static void igb_write_vfta(struct e1000_hw *hw, u32 offset, u32 value)
++/* Changed name, duplicated with e1000 */
++void igb_e1000_set_lan_id_single_port(struct e1000_hw *hw)
+ {
+- array_wr32(E1000_VFTA, offset, value);
+- wrfl();
+-}
++ struct e1000_bus_info *bus = &hw->bus;
+
+-/* Due to a hw errata, if the host tries to configure the VFTA register
+- * while performing queries from the BMC or DMA, then the VFTA in some
+- * cases won't be written.
+- */
++ bus->func = 0;
++}
+
+ /**
+- * igb_clear_vfta_i350 - Clear VLAN filter table
++ * igb_e1000_clear_vfta_generic - Clear VLAN filter table
+ * @hw: pointer to the HW structure
+ *
+ * Clears the register array which contains the VLAN filter table by
+ * setting all the values to 0.
+ **/
+-void igb_clear_vfta_i350(struct e1000_hw *hw)
++/* Changed name, duplicated with e1000 */
++void igb_e1000_clear_vfta_generic(struct e1000_hw *hw)
+ {
+ u32 offset;
+- int i;
+
+- for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) {
+- for (i = 0; i < 10; i++)
+- array_wr32(E1000_VFTA, offset, 0);
++ DEBUGFUNC("igb_e1000_clear_vfta_generic");
+
+- wrfl();
++ for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) {
++ E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset, 0);
++ E1000_WRITE_FLUSH(hw);
+ }
+ }
+
+ /**
+- * igb_write_vfta_i350 - Write value to VLAN filter table
++ * igb_e1000_write_vfta_generic - Write value to VLAN filter table
+ * @hw: pointer to the HW structure
+ * @offset: register offset in VLAN filter table
+ * @value: register value written to VLAN filter table
+@@ -147,113 +253,85 @@
+ * Writes value at the given offset in the register array which stores
+ * the VLAN filter table.
+ **/
+-static void igb_write_vfta_i350(struct e1000_hw *hw, u32 offset, u32 value)
++/* Changed name, duplicated with e1000 */
++void igb_e1000_write_vfta_generic(struct e1000_hw *hw, u32 offset, u32 value)
+ {
+- int i;
+-
+- for (i = 0; i < 10; i++)
+- array_wr32(E1000_VFTA, offset, value);
++ DEBUGFUNC("igb_e1000_write_vfta_generic");
+
+- wrfl();
++ E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset, value);
++ E1000_WRITE_FLUSH(hw);
+ }
+
+ /**
+- * igb_init_rx_addrs - Initialize receive address's
++ * e1000_init_rx_addrs_generic - Initialize receive address's
+ * @hw: pointer to the HW structure
+ * @rar_count: receive address registers
+ *
+- * Setups the receive address registers by setting the base receive address
++ * Setup the receive address registers by setting the base receive address
+ * register to the devices MAC address and clearing all the other receive
+ * address registers to 0.
+ **/
+-void igb_init_rx_addrs(struct e1000_hw *hw, u16 rar_count)
++void e1000_init_rx_addrs_generic(struct e1000_hw *hw, u16 rar_count)
+ {
+ u32 i;
+- u8 mac_addr[ETH_ALEN] = {0};
++ u8 mac_addr[ETH_ADDR_LEN] = {0};
++
++ DEBUGFUNC("e1000_init_rx_addrs_generic");
+
+ /* Setup the receive address */
+- hw_dbg("Programming MAC Address into RAR[0]\n");
++ DEBUGOUT("Programming MAC Address into RAR[0]\n");
+
+ hw->mac.ops.rar_set(hw, hw->mac.addr, 0);
+
+ /* Zero out the other (rar_entry_count - 1) receive addresses */
+- hw_dbg("Clearing RAR[1-%u]\n", rar_count-1);
++ DEBUGOUT1("Clearing RAR[1-%u]\n", rar_count-1);
+ for (i = 1; i < rar_count; i++)
+ hw->mac.ops.rar_set(hw, mac_addr, i);
+ }
+
+ /**
+- * igb_vfta_set - enable or disable vlan in VLAN filter table
+- * @hw: pointer to the HW structure
+- * @vid: VLAN id to add or remove
+- * @add: if true add filter, if false remove
+- *
+- * Sets or clears a bit in the VLAN filter table array based on VLAN id
+- * and if we are adding or removing the filter
+- **/
+-s32 igb_vfta_set(struct e1000_hw *hw, u32 vid, bool add)
+-{
+- u32 index = (vid >> E1000_VFTA_ENTRY_SHIFT) & E1000_VFTA_ENTRY_MASK;
+- u32 mask = 1 << (vid & E1000_VFTA_ENTRY_BIT_SHIFT_MASK);
+- u32 vfta;
+- struct igb_adapter *adapter = hw->back;
+- s32 ret_val = 0;
+-
+- vfta = adapter->shadow_vfta[index];
+-
+- /* bit was set/cleared before we started */
+- if ((!!(vfta & mask)) == add) {
+- ret_val = -E1000_ERR_CONFIG;
+- } else {
+- if (add)
+- vfta |= mask;
+- else
+- vfta &= ~mask;
+- }
+- if ((hw->mac.type == e1000_i350) || (hw->mac.type == e1000_i354))
+- igb_write_vfta_i350(hw, index, vfta);
+- else
+- igb_write_vfta(hw, index, vfta);
+- adapter->shadow_vfta[index] = vfta;
+-
+- return ret_val;
+-}
+-
+-/**
+- * igb_check_alt_mac_addr - Check for alternate MAC addr
++ * igb_e1000_check_alt_mac_addr_generic - Check for alternate MAC addr
+ * @hw: pointer to the HW structure
+ *
+ * Checks the nvm for an alternate MAC address. An alternate MAC address
+ * can be setup by pre-boot software and must be treated like a permanent
+- * address and must override the actual permanent MAC address. If an
+- * alternate MAC address is found it is saved in the hw struct and
+- * programmed into RAR0 and the function returns success, otherwise the
+- * function returns an error.
++ * address and must override the actual permanent MAC address. If an
++ * alternate MAC address is found it is programmed into RAR0, replacing
++ * the permanent address that was installed into RAR0 by the Si on reset.
++ * This function will return SUCCESS unless it encounters an error while
++ * reading the EEPROM.
+ **/
+-s32 igb_check_alt_mac_addr(struct e1000_hw *hw)
++/* Changed name, duplicated with e1000 */
++s32 igb_e1000_check_alt_mac_addr_generic(struct e1000_hw *hw)
+ {
+ u32 i;
+- s32 ret_val = 0;
++ s32 ret_val;
+ u16 offset, nvm_alt_mac_addr_offset, nvm_data;
+- u8 alt_mac_addr[ETH_ALEN];
++ u8 alt_mac_addr[ETH_ADDR_LEN];
++
++ DEBUGFUNC("igb_e1000_check_alt_mac_addr_generic");
++
++ ret_val = hw->nvm.ops.read(hw, NVM_COMPAT, 1, &nvm_data);
++ if (ret_val)
++ return ret_val;
+
+ /* Alternate MAC address is handled by the option ROM for 82580
+ * and newer. SW support not required.
+ */
+ if (hw->mac.type >= e1000_82580)
+- goto out;
++ return E1000_SUCCESS;
+
+ ret_val = hw->nvm.ops.read(hw, NVM_ALT_MAC_ADDR_PTR, 1,
+- &nvm_alt_mac_addr_offset);
++ &nvm_alt_mac_addr_offset);
+ if (ret_val) {
+- hw_dbg("NVM Read Error\n");
+- goto out;
++ DEBUGOUT("NVM Read Error\n");
++ return ret_val;
+ }
+
+ if ((nvm_alt_mac_addr_offset == 0xFFFF) ||
+ (nvm_alt_mac_addr_offset == 0x0000))
+ /* There is no Alternate MAC Address */
+- goto out;
++ return E1000_SUCCESS;
+
+ if (hw->bus.func == E1000_FUNC_1)
+ nvm_alt_mac_addr_offset += E1000_ALT_MAC_ADDRESS_OFFSET_LAN1;
+@@ -262,12 +340,12 @@
+
+ if (hw->bus.func == E1000_FUNC_3)
+ nvm_alt_mac_addr_offset += E1000_ALT_MAC_ADDRESS_OFFSET_LAN3;
+- for (i = 0; i < ETH_ALEN; i += 2) {
++ for (i = 0; i < ETH_ADDR_LEN; i += 2) {
+ offset = nvm_alt_mac_addr_offset + (i >> 1);
+ ret_val = hw->nvm.ops.read(hw, offset, 1, &nvm_data);
+ if (ret_val) {
+- hw_dbg("NVM Read Error\n");
+- goto out;
++ DEBUGOUT("NVM Read Error\n");
++ return ret_val;
+ }
+
+ alt_mac_addr[i] = (u8)(nvm_data & 0xFF);
+@@ -275,9 +353,9 @@
+ }
+
+ /* if multicast bit is set, the alternate address will not be used */
+- if (is_multicast_ether_addr(alt_mac_addr)) {
+- hw_dbg("Ignoring Alternate Mac Address with MC bit set\n");
+- goto out;
++ if (alt_mac_addr[0] & 0x01) {
++ DEBUGOUT("Ignoring Alternate Mac Address with MC bit set\n");
++ return E1000_SUCCESS;
+ }
+
+ /* We have a valid alternate MAC address, and we want to treat it the
+@@ -286,12 +364,11 @@
+ */
+ hw->mac.ops.rar_set(hw, alt_mac_addr, 0);
+
+-out:
+- return ret_val;
++ return E1000_SUCCESS;
+ }
+
+ /**
+- * igb_rar_set - Set receive address register
++ * e1000_rar_set_generic - Set receive address register
+ * @hw: pointer to the HW structure
+ * @addr: pointer to the receive address
+ * @index: receive address array register
+@@ -299,16 +376,17 @@
+ * Sets the receive address array register at index to the address passed
+ * in by addr.
+ **/
+-void igb_rar_set(struct e1000_hw *hw, u8 *addr, u32 index)
++static int e1000_rar_set_generic(struct e1000_hw *hw, u8 *addr, u32 index)
+ {
+ u32 rar_low, rar_high;
+
++ DEBUGFUNC("e1000_rar_set_generic");
++
+ /* HW expects these in little endian so we reverse the byte order
+ * from network order (big endian) to little endian
+ */
+- rar_low = ((u32) addr[0] |
+- ((u32) addr[1] << 8) |
+- ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
++ rar_low = ((u32) addr[0] | ((u32) addr[1] << 8) |
++ ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
+
+ rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
+
+@@ -320,60 +398,29 @@
+ * a single burst write, which will malfunction on some parts.
+ * The flushes avoid this.
+ */
+- wr32(E1000_RAL(index), rar_low);
+- wrfl();
+- wr32(E1000_RAH(index), rar_high);
+- wrfl();
+-}
++ E1000_WRITE_REG(hw, E1000_RAL(index), rar_low);
++ E1000_WRITE_FLUSH(hw);
++ E1000_WRITE_REG(hw, E1000_RAH(index), rar_high);
++ E1000_WRITE_FLUSH(hw);
+
+-/**
+- * igb_mta_set - Set multicast filter table address
+- * @hw: pointer to the HW structure
+- * @hash_value: determines the MTA register and bit to set
+- *
+- * The multicast table address is a register array of 32-bit registers.
+- * The hash_value is used to determine what register the bit is in, the
+- * current value is read, the new bit is OR'd in and the new value is
+- * written back into the register.
+- **/
+-void igb_mta_set(struct e1000_hw *hw, u32 hash_value)
+-{
+- u32 hash_bit, hash_reg, mta;
+-
+- /* The MTA is a register array of 32-bit registers. It is
+- * treated like an array of (32*mta_reg_count) bits. We want to
+- * set bit BitArray[hash_value]. So we figure out what register
+- * the bit is in, read it, OR in the new bit, then write
+- * back the new value. The (hw->mac.mta_reg_count - 1) serves as a
+- * mask to bits 31:5 of the hash value which gives us the
+- * register we're modifying. The hash bit within that register
+- * is determined by the lower 5 bits of the hash value.
+- */
+- hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1);
+- hash_bit = hash_value & 0x1F;
+-
+- mta = array_rd32(E1000_MTA, hash_reg);
+-
+- mta |= (1 << hash_bit);
+-
+- array_wr32(E1000_MTA, hash_reg, mta);
+- wrfl();
++ return E1000_SUCCESS;
+ }
+
+ /**
+- * igb_hash_mc_addr - Generate a multicast hash value
++ * e1000_hash_mc_addr_generic - Generate a multicast hash value
+ * @hw: pointer to the HW structure
+ * @mc_addr: pointer to a multicast address
+ *
+ * Generates a multicast address hash value which is used to determine
+- * the multicast filter table array address and new table value. See
+- * igb_mta_set()
++ * the multicast filter table array address and new table value.
+ **/
+-static u32 igb_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr)
++u32 e1000_hash_mc_addr_generic(struct e1000_hw *hw, u8 *mc_addr)
+ {
+ u32 hash_value, hash_mask;
+ u8 bit_shift = 0;
+
++ DEBUGFUNC("e1000_hash_mc_addr_generic");
++
+ /* Register count multiplied by bits per register */
+ hash_mask = (hw->mac.mta_reg_count * 32) - 1;
+
+@@ -401,7 +448,7 @@
+ * values resulting from each mc_filter_type...
+ * [0] [1] [2] [3] [4] [5]
+ * 01 AA 00 12 34 56
+- * LSB MSB
++ * LSB MSB
+ *
+ * case 0: hash_value = ((0x34 >> 4) | (0x56 << 4)) & 0xFFF = 0x563
+ * case 1: hash_value = ((0x34 >> 3) | (0x56 << 5)) & 0xFFF = 0xAC6
+@@ -430,7 +477,7 @@
+ }
+
+ /**
+- * igb_update_mc_addr_list - Update Multicast addresses
++ * e1000_update_mc_addr_list_generic - Update Multicast addresses
+ * @hw: pointer to the HW structure
+ * @mc_addr_list: array of multicast addresses to program
+ * @mc_addr_count: number of multicast addresses to program
+@@ -438,150 +485,412 @@
+ * Updates entire Multicast Table Array.
+ * The caller must have a packed mc_addr_list of multicast addresses.
+ **/
+-void igb_update_mc_addr_list(struct e1000_hw *hw,
+- u8 *mc_addr_list, u32 mc_addr_count)
++void e1000_update_mc_addr_list_generic(struct e1000_hw *hw,
++ u8 *mc_addr_list, u32 mc_addr_count)
+ {
+ u32 hash_value, hash_bit, hash_reg;
+ int i;
+
++ DEBUGFUNC("e1000_update_mc_addr_list_generic");
++
+ /* clear mta_shadow */
+ memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow));
+
+ /* update mta_shadow from mc_addr_list */
+ for (i = 0; (u32) i < mc_addr_count; i++) {
+- hash_value = igb_hash_mc_addr(hw, mc_addr_list);
++ hash_value = e1000_hash_mc_addr_generic(hw, mc_addr_list);
+
+ hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1);
+ hash_bit = hash_value & 0x1F;
+
+ hw->mac.mta_shadow[hash_reg] |= (1 << hash_bit);
+- mc_addr_list += (ETH_ALEN);
++ mc_addr_list += (ETH_ADDR_LEN);
+ }
+
+ /* replace the entire MTA table */
+ for (i = hw->mac.mta_reg_count - 1; i >= 0; i--)
+- array_wr32(E1000_MTA, i, hw->mac.mta_shadow[i]);
+- wrfl();
++ E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, hw->mac.mta_shadow[i]);
++ E1000_WRITE_FLUSH(hw);
+ }
+
+ /**
+- * igb_clear_hw_cntrs_base - Clear base hardware counters
++ * e1000_pcix_mmrbc_workaround_generic - Fix incorrect MMRBC value
++ * @hw: pointer to the HW structure
++ *
++ * In certain situations, a system BIOS may report that the PCIx maximum
++ * memory read byte count (MMRBC) value is higher than than the actual
++ * value. We check the PCIx command register with the current PCIx status
++ * register.
++ **/
++void e1000_pcix_mmrbc_workaround_generic(struct e1000_hw *hw)
++{
++ u16 cmd_mmrbc;
++ u16 pcix_cmd;
++ u16 pcix_stat_hi_word;
++ u16 stat_mmrbc;
++
++ DEBUGFUNC("e1000_pcix_mmrbc_workaround_generic");
++
++ /* Workaround for PCI-X issue when BIOS sets MMRBC incorrectly */
++ if (hw->bus.type != e1000_bus_type_pcix)
++ return;
++
++ e1000_read_pci_cfg(hw, PCIX_COMMAND_REGISTER, &pcix_cmd);
++ e1000_read_pci_cfg(hw, PCIX_STATUS_REGISTER_HI, &pcix_stat_hi_word);
++ cmd_mmrbc = (pcix_cmd & PCIX_COMMAND_MMRBC_MASK) >>
++ PCIX_COMMAND_MMRBC_SHIFT;
++ stat_mmrbc = (pcix_stat_hi_word & PCIX_STATUS_HI_MMRBC_MASK) >>
++ PCIX_STATUS_HI_MMRBC_SHIFT;
++ if (stat_mmrbc == PCIX_STATUS_HI_MMRBC_4K)
++ stat_mmrbc = PCIX_STATUS_HI_MMRBC_2K;
++ if (cmd_mmrbc > stat_mmrbc) {
++ pcix_cmd &= ~PCIX_COMMAND_MMRBC_MASK;
++ pcix_cmd |= stat_mmrbc << PCIX_COMMAND_MMRBC_SHIFT;
++ e1000_write_pci_cfg(hw, PCIX_COMMAND_REGISTER, &pcix_cmd);
++ }
++}
++
++/**
++ * e1000_clear_hw_cntrs_base_generic - Clear base hardware counters
+ * @hw: pointer to the HW structure
+ *
+ * Clears the base hardware counters by reading the counter registers.
+ **/
+-void igb_clear_hw_cntrs_base(struct e1000_hw *hw)
++void e1000_clear_hw_cntrs_base_generic(struct e1000_hw *hw)
+ {
+- rd32(E1000_CRCERRS);
+- rd32(E1000_SYMERRS);
+- rd32(E1000_MPC);
+- rd32(E1000_SCC);
+- rd32(E1000_ECOL);
+- rd32(E1000_MCC);
+- rd32(E1000_LATECOL);
+- rd32(E1000_COLC);
+- rd32(E1000_DC);
+- rd32(E1000_SEC);
+- rd32(E1000_RLEC);
+- rd32(E1000_XONRXC);
+- rd32(E1000_XONTXC);
+- rd32(E1000_XOFFRXC);
+- rd32(E1000_XOFFTXC);
+- rd32(E1000_FCRUC);
+- rd32(E1000_GPRC);
+- rd32(E1000_BPRC);
+- rd32(E1000_MPRC);
+- rd32(E1000_GPTC);
+- rd32(E1000_GORCL);
+- rd32(E1000_GORCH);
+- rd32(E1000_GOTCL);
+- rd32(E1000_GOTCH);
+- rd32(E1000_RNBC);
+- rd32(E1000_RUC);
+- rd32(E1000_RFC);
+- rd32(E1000_ROC);
+- rd32(E1000_RJC);
+- rd32(E1000_TORL);
+- rd32(E1000_TORH);
+- rd32(E1000_TOTL);
+- rd32(E1000_TOTH);
+- rd32(E1000_TPR);
+- rd32(E1000_TPT);
+- rd32(E1000_MPTC);
+- rd32(E1000_BPTC);
++ DEBUGFUNC("e1000_clear_hw_cntrs_base_generic");
++
++ E1000_READ_REG(hw, E1000_CRCERRS);
++ E1000_READ_REG(hw, E1000_SYMERRS);
++ E1000_READ_REG(hw, E1000_MPC);
++ E1000_READ_REG(hw, E1000_SCC);
++ E1000_READ_REG(hw, E1000_ECOL);
++ E1000_READ_REG(hw, E1000_MCC);
++ E1000_READ_REG(hw, E1000_LATECOL);
++ E1000_READ_REG(hw, E1000_COLC);
++ E1000_READ_REG(hw, E1000_DC);
++ E1000_READ_REG(hw, E1000_SEC);
++ E1000_READ_REG(hw, E1000_RLEC);
++ E1000_READ_REG(hw, E1000_XONRXC);
++ E1000_READ_REG(hw, E1000_XONTXC);
++ E1000_READ_REG(hw, E1000_XOFFRXC);
++ E1000_READ_REG(hw, E1000_XOFFTXC);
++ E1000_READ_REG(hw, E1000_FCRUC);
++ E1000_READ_REG(hw, E1000_GPRC);
++ E1000_READ_REG(hw, E1000_BPRC);
++ E1000_READ_REG(hw, E1000_MPRC);
++ E1000_READ_REG(hw, E1000_GPTC);
++ E1000_READ_REG(hw, E1000_GORCL);
++ E1000_READ_REG(hw, E1000_GORCH);
++ E1000_READ_REG(hw, E1000_GOTCL);
++ E1000_READ_REG(hw, E1000_GOTCH);
++ E1000_READ_REG(hw, E1000_RNBC);
++ E1000_READ_REG(hw, E1000_RUC);
++ E1000_READ_REG(hw, E1000_RFC);
++ E1000_READ_REG(hw, E1000_ROC);
++ E1000_READ_REG(hw, E1000_RJC);
++ E1000_READ_REG(hw, E1000_TORL);
++ E1000_READ_REG(hw, E1000_TORH);
++ E1000_READ_REG(hw, E1000_TOTL);
++ E1000_READ_REG(hw, E1000_TOTH);
++ E1000_READ_REG(hw, E1000_TPR);
++ E1000_READ_REG(hw, E1000_TPT);
++ E1000_READ_REG(hw, E1000_MPTC);
++ E1000_READ_REG(hw, E1000_BPTC);
+ }
+
+ /**
+- * igb_check_for_copper_link - Check for link (Copper)
++ * e1000_check_for_copper_link_generic - Check for link (Copper)
+ * @hw: pointer to the HW structure
+ *
+ * Checks to see of the link status of the hardware has changed. If a
+ * change in link status has been detected, then we read the PHY registers
+ * to get the current speed/duplex if link exists.
+ **/
+-s32 igb_check_for_copper_link(struct e1000_hw *hw)
++s32 e1000_check_for_copper_link_generic(struct e1000_hw *hw)
+ {
+ struct e1000_mac_info *mac = &hw->mac;
+ s32 ret_val;
+ bool link;
+
++ DEBUGFUNC("e1000_check_for_copper_link");
++
+ /* We only want to go out to the PHY registers to see if Auto-Neg
+ * has completed and/or if our link status has changed. The
+ * get_link_status flag is set upon receiving a Link Status
+ * Change or Rx Sequence Error interrupt.
+ */
+- if (!mac->get_link_status) {
+- ret_val = 0;
+- goto out;
+- }
++ if (!mac->get_link_status)
++ return E1000_SUCCESS;
+
+ /* First we want to see if the MII Status Register reports
+ * link. If so, then we want to get the current speed/duplex
+ * of the PHY.
+ */
+- ret_val = igb_phy_has_link(hw, 1, 0, &link);
++ ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link);
+ if (ret_val)
+- goto out;
++ return ret_val;
+
+ if (!link)
+- goto out; /* No link detected */
++ return E1000_SUCCESS; /* No link detected */
+
+ mac->get_link_status = false;
+
+ /* Check if there was DownShift, must be checked
+ * immediately after link-up
+ */
+- igb_check_downshift(hw);
++ e1000_check_downshift_generic(hw);
+
+ /* If we are forcing speed/duplex, then we simply return since
+ * we have already determined whether we have link or not.
+ */
+- if (!mac->autoneg) {
+- ret_val = -E1000_ERR_CONFIG;
+- goto out;
+- }
++ if (!mac->autoneg)
++ return -E1000_ERR_CONFIG;
+
+ /* Auto-Neg is enabled. Auto Speed Detection takes care
+ * of MAC speed/duplex configuration. So we only need to
+ * configure Collision Distance in the MAC.
+ */
+- igb_config_collision_dist(hw);
++ mac->ops.config_collision_dist(hw);
+
+ /* Configure Flow Control now that Auto-Neg has completed.
+ * First, we need to restore the desired flow control
+ * settings because we may have had to re-autoneg with a
+ * different link partner.
+ */
+- ret_val = igb_config_fc_after_link_up(hw);
++ ret_val = e1000_config_fc_after_link_up_generic(hw);
+ if (ret_val)
+- hw_dbg("Error configuring flow control\n");
++ DEBUGOUT("Error configuring flow control\n");
+
+-out:
+ return ret_val;
+ }
+
+ /**
+- * igb_setup_link - Setup flow control and link settings
++ * e1000_check_for_fiber_link_generic - Check for link (Fiber)
++ * @hw: pointer to the HW structure
++ *
++ * Checks for link up on the hardware. If link is not up and we have
++ * a signal, then we need to force link up.
++ **/
++s32 e1000_check_for_fiber_link_generic(struct e1000_hw *hw)
++{
++ struct e1000_mac_info *mac = &hw->mac;
++ u32 rxcw;
++ u32 ctrl;
++ u32 status;
++ s32 ret_val;
++
++ DEBUGFUNC("e1000_check_for_fiber_link_generic");
++
++ ctrl = E1000_READ_REG(hw, E1000_CTRL);
++ status = E1000_READ_REG(hw, E1000_STATUS);
++ rxcw = E1000_READ_REG(hw, E1000_RXCW);
++
++ /* If we don't have link (auto-negotiation failed or link partner
++ * cannot auto-negotiate), the cable is plugged in (we have signal),
++ * and our link partner is not trying to auto-negotiate with us (we
++ * are receiving idles or data), we need to force link up. We also
++ * need to give auto-negotiation time to complete, in case the cable
++ * was just plugged in. The autoneg_failed flag does this.
++ */
++ /* (ctrl & E1000_CTRL_SWDPIN1) == 1 == have signal */
++ if ((ctrl & E1000_CTRL_SWDPIN1) && !(status & E1000_STATUS_LU) &&
++ !(rxcw & E1000_RXCW_C)) {
++ if (!mac->autoneg_failed) {
++ mac->autoneg_failed = true;
++ return E1000_SUCCESS;
++ }
++ DEBUGOUT("NOT Rx'ing /C/, disable AutoNeg and force link.\n");
++
++ /* Disable auto-negotiation in the TXCW register */
++ E1000_WRITE_REG(hw, E1000_TXCW, (mac->txcw & ~E1000_TXCW_ANE));
++
++ /* Force link-up and also force full-duplex. */
++ ctrl = E1000_READ_REG(hw, E1000_CTRL);
++ ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FD);
++ E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
++
++ /* Configure Flow Control after forcing link up. */
++ ret_val = e1000_config_fc_after_link_up_generic(hw);
++ if (ret_val) {
++ DEBUGOUT("Error configuring flow control\n");
++ return ret_val;
++ }
++ } else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) {
++ /* If we are forcing link and we are receiving /C/ ordered
++ * sets, re-enable auto-negotiation in the TXCW register
++ * and disable forced link in the Device Control register
++ * in an attempt to auto-negotiate with our link partner.
++ */
++ DEBUGOUT("Rx'ing /C/, enable AutoNeg and stop forcing link.\n");
++ E1000_WRITE_REG(hw, E1000_TXCW, mac->txcw);
++ E1000_WRITE_REG(hw, E1000_CTRL, (ctrl & ~E1000_CTRL_SLU));
++
++ mac->serdes_has_link = true;
++ }
++
++ return E1000_SUCCESS;
++}
++
++/**
++ * e1000_check_for_serdes_link_generic - Check for link (Serdes)
++ * @hw: pointer to the HW structure
++ *
++ * Checks for link up on the hardware. If link is not up and we have
++ * a signal, then we need to force link up.
++ **/
++s32 e1000_check_for_serdes_link_generic(struct e1000_hw *hw)
++{
++ struct e1000_mac_info *mac = &hw->mac;
++ u32 rxcw;
++ u32 ctrl;
++ u32 status;
++ s32 ret_val;
++
++ DEBUGFUNC("e1000_check_for_serdes_link_generic");
++
++ ctrl = E1000_READ_REG(hw, E1000_CTRL);
++ status = E1000_READ_REG(hw, E1000_STATUS);
++ rxcw = E1000_READ_REG(hw, E1000_RXCW);
++
++ /* If we don't have link (auto-negotiation failed or link partner
++ * cannot auto-negotiate), and our link partner is not trying to
++ * auto-negotiate with us (we are receiving idles or data),
++ * we need to force link up. We also need to give auto-negotiation
++ * time to complete.
++ */
++ /* (ctrl & E1000_CTRL_SWDPIN1) == 1 == have signal */
++ if (!(status & E1000_STATUS_LU) && !(rxcw & E1000_RXCW_C)) {
++ if (!mac->autoneg_failed) {
++ mac->autoneg_failed = true;
++ return E1000_SUCCESS;
++ }
++ DEBUGOUT("NOT Rx'ing /C/, disable AutoNeg and force link.\n");
++
++ /* Disable auto-negotiation in the TXCW register */
++ E1000_WRITE_REG(hw, E1000_TXCW, (mac->txcw & ~E1000_TXCW_ANE));
++
++ /* Force link-up and also force full-duplex. */
++ ctrl = E1000_READ_REG(hw, E1000_CTRL);
++ ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FD);
++ E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
++
++ /* Configure Flow Control after forcing link up. */
++ ret_val = e1000_config_fc_after_link_up_generic(hw);
++ if (ret_val) {
++ DEBUGOUT("Error configuring flow control\n");
++ return ret_val;
++ }
++ } else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) {
++ /* If we are forcing link and we are receiving /C/ ordered
++ * sets, re-enable auto-negotiation in the TXCW register
++ * and disable forced link in the Device Control register
++ * in an attempt to auto-negotiate with our link partner.
++ */
++ DEBUGOUT("Rx'ing /C/, enable AutoNeg and stop forcing link.\n");
++ E1000_WRITE_REG(hw, E1000_TXCW, mac->txcw);
++ E1000_WRITE_REG(hw, E1000_CTRL, (ctrl & ~E1000_CTRL_SLU));
++
++ mac->serdes_has_link = true;
++ } else if (!(E1000_TXCW_ANE & E1000_READ_REG(hw, E1000_TXCW))) {
++ /* If we force link for non-auto-negotiation switch, check
++ * link status based on MAC synchronization for internal
++ * serdes media type.
++ */
++ /* SYNCH bit and IV bit are sticky. */
++ usec_delay(10);
++ rxcw = E1000_READ_REG(hw, E1000_RXCW);
++ if (rxcw & E1000_RXCW_SYNCH) {
++ if (!(rxcw & E1000_RXCW_IV)) {
++ mac->serdes_has_link = true;
++ DEBUGOUT("SERDES: Link up - forced.\n");
++ }
++ } else {
++ mac->serdes_has_link = false;
++ DEBUGOUT("SERDES: Link down - force failed.\n");
++ }
++ }
++
++ if (E1000_TXCW_ANE & E1000_READ_REG(hw, E1000_TXCW)) {
++ status = E1000_READ_REG(hw, E1000_STATUS);
++ if (status & E1000_STATUS_LU) {
++ /* SYNCH bit and IV bit are sticky, so reread rxcw. */
++ usec_delay(10);
++ rxcw = E1000_READ_REG(hw, E1000_RXCW);
++ if (rxcw & E1000_RXCW_SYNCH) {
++ if (!(rxcw & E1000_RXCW_IV)) {
++ mac->serdes_has_link = true;
++ DEBUGOUT("SERDES: Link up - autoneg completed successfully.\n");
++ } else {
++ mac->serdes_has_link = false;
++ DEBUGOUT("SERDES: Link down - invalid codewords detected in autoneg.\n");
++ }
++ } else {
++ mac->serdes_has_link = false;
++ DEBUGOUT("SERDES: Link down - no sync.\n");
++ }
++ } else {
++ mac->serdes_has_link = false;
++ DEBUGOUT("SERDES: Link down - autoneg failed\n");
++ }
++ }
++
++ return E1000_SUCCESS;
++}
++
++/**
++ * e1000_set_default_fc_generic - Set flow control default values
++ * @hw: pointer to the HW structure
++ *
++ * Read the EEPROM for the default values for flow control and store the
++ * values.
++ **/
++static s32 e1000_set_default_fc_generic(struct e1000_hw *hw)
++{
++ s32 ret_val;
++ u16 nvm_data;
++ u16 nvm_offset = 0;
++
++ DEBUGFUNC("e1000_set_default_fc_generic");
++
++ /* Read and store word 0x0F of the EEPROM. This word contains bits
++ * that determine the hardware's default PAUSE (flow control) mode,
++ * a bit that determines whether the HW defaults to enabling or
++ * disabling auto-negotiation, and the direction of the
++ * SW defined pins. If there is no SW over-ride of the flow
++ * control setting, then the variable hw->fc will
++ * be initialized based on a value in the EEPROM.
++ */
++ if (hw->mac.type == e1000_i350) {
++ nvm_offset = NVM_82580_LAN_FUNC_OFFSET(hw->bus.func);
++ ret_val = hw->nvm.ops.read(hw,
++ NVM_INIT_CONTROL2_REG +
++ nvm_offset,
++ 1, &nvm_data);
++ } else {
++ ret_val = hw->nvm.ops.read(hw,
++ NVM_INIT_CONTROL2_REG,
++ 1, &nvm_data);
++ }
++
++ if (ret_val) {
++ DEBUGOUT("NVM Read Error\n");
++ return ret_val;
++ }
++
++ if (!(nvm_data & NVM_WORD0F_PAUSE_MASK))
++ hw->fc.requested_mode = e1000_fc_none;
++ else if ((nvm_data & NVM_WORD0F_PAUSE_MASK) ==
++ NVM_WORD0F_ASM_DIR)
++ hw->fc.requested_mode = e1000_fc_tx_pause;
++ else
++ hw->fc.requested_mode = e1000_fc_full;
++
++ return E1000_SUCCESS;
++}
++
++/**
++ * e1000_setup_link_generic - Setup flow control and link settings
+ * @hw: pointer to the HW structure
+ *
+ * Determines which flow control settings to use, then configures flow
+@@ -590,91 +899,260 @@
+ * should be established. Assumes the hardware has previously been reset
+ * and the transmitter and receiver are not enabled.
+ **/
+-s32 igb_setup_link(struct e1000_hw *hw)
++s32 e1000_setup_link_generic(struct e1000_hw *hw)
+ {
+- s32 ret_val = 0;
++ s32 ret_val;
++
++ DEBUGFUNC("e1000_setup_link_generic");
+
+ /* In the case of the phy reset being blocked, we already have a link.
+ * We do not need to set it up again.
+ */
+- if (igb_check_reset_block(hw))
+- goto out;
++ if (hw->phy.ops.check_reset_block && hw->phy.ops.check_reset_block(hw))
++ return E1000_SUCCESS;
+
+ /* If requested flow control is set to default, set flow control
+ * based on the EEPROM flow control settings.
+ */
+ if (hw->fc.requested_mode == e1000_fc_default) {
+- ret_val = igb_set_default_fc(hw);
++ ret_val = e1000_set_default_fc_generic(hw);
+ if (ret_val)
+- goto out;
++ return ret_val;
+ }
+
+- /* We want to save off the original Flow Control configuration just
+- * in case we get disconnected and then reconnected into a different
+- * hub or switch with different Flow Control capabilities.
++ /* Save off the requested flow control mode for use later. Depending
++ * on the link partner's capabilities, we may or may not use this mode.
+ */
+ hw->fc.current_mode = hw->fc.requested_mode;
+
+- hw_dbg("After fix-ups FlowControl is now = %x\n", hw->fc.current_mode);
++ DEBUGOUT1("After fix-ups FlowControl is now = %x\n",
++ hw->fc.current_mode);
+
+ /* Call the necessary media_type subroutine to configure the link. */
+ ret_val = hw->mac.ops.setup_physical_interface(hw);
+ if (ret_val)
+- goto out;
++ return ret_val;
+
+ /* Initialize the flow control address, type, and PAUSE timer
+ * registers to their default values. This is done even if flow
+ * control is disabled, because it does not hurt anything to
+ * initialize these registers.
+ */
+- hw_dbg("Initializing the Flow Control address, type and timer regs\n");
+- wr32(E1000_FCT, FLOW_CONTROL_TYPE);
+- wr32(E1000_FCAH, FLOW_CONTROL_ADDRESS_HIGH);
+- wr32(E1000_FCAL, FLOW_CONTROL_ADDRESS_LOW);
++ DEBUGOUT("Initializing the Flow Control address, type and timer regs\n");
++ E1000_WRITE_REG(hw, E1000_FCT, FLOW_CONTROL_TYPE);
++ E1000_WRITE_REG(hw, E1000_FCAH, FLOW_CONTROL_ADDRESS_HIGH);
++ E1000_WRITE_REG(hw, E1000_FCAL, FLOW_CONTROL_ADDRESS_LOW);
++
++ E1000_WRITE_REG(hw, E1000_FCTTV, hw->fc.pause_time);
+
+- wr32(E1000_FCTTV, hw->fc.pause_time);
++ return e1000_set_fc_watermarks_generic(hw);
++}
+
+- ret_val = igb_set_fc_watermarks(hw);
++/**
++ * e1000_commit_fc_settings_generic - Configure flow control
++ * @hw: pointer to the HW structure
++ *
++ * Write the flow control settings to the Transmit Config Word Register (TXCW)
++ * base on the flow control settings in e1000_mac_info.
++ **/
++static s32 e1000_commit_fc_settings_generic(struct e1000_hw *hw)
++{
++ struct e1000_mac_info *mac = &hw->mac;
++ u32 txcw;
+
+-out:
++ DEBUGFUNC("e1000_commit_fc_settings_generic");
++
++ /* Check for a software override of the flow control settings, and
++ * setup the device accordingly. If auto-negotiation is enabled, then
++ * software will have to set the "PAUSE" bits to the correct value in
++ * the Transmit Config Word Register (TXCW) and re-start auto-
++ * negotiation. However, if auto-negotiation is disabled, then
++ * software will have to manually configure the two flow control enable
++ * bits in the CTRL register.
++ *
++ * The possible values of the "fc" parameter are:
++ * 0: Flow control is completely disabled
++ * 1: Rx flow control is enabled (we can receive pause frames,
++ * but not send pause frames).
++ * 2: Tx flow control is enabled (we can send pause frames but we
++ * do not support receiving pause frames).
++ * 3: Both Rx and Tx flow control (symmetric) are enabled.
++ */
++ switch (hw->fc.current_mode) {
++ case e1000_fc_none:
++ /* Flow control completely disabled by a software over-ride. */
++ txcw = (E1000_TXCW_ANE | E1000_TXCW_FD);
++ break;
++ case e1000_fc_rx_pause:
++ /* Rx Flow control is enabled and Tx Flow control is disabled
++ * by a software over-ride. Since there really isn't a way to
++ * advertise that we are capable of Rx Pause ONLY, we will
++ * advertise that we support both symmetric and asymmetric Rx
++ * PAUSE. Later, we will disable the adapter's ability to send
++ * PAUSE frames.
++ */
++ txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK);
++ break;
++ case e1000_fc_tx_pause:
++ /* Tx Flow control is enabled, and Rx Flow control is disabled,
++ * by a software over-ride.
++ */
++ txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_ASM_DIR);
++ break;
++ case e1000_fc_full:
++ /* Flow control (both Rx and Tx) is enabled by a software
++ * over-ride.
++ */
++ txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK);
++ break;
++ default:
++ DEBUGOUT("Flow control param set incorrectly\n");
++ return -E1000_ERR_CONFIG;
++ break;
++ }
++
++ E1000_WRITE_REG(hw, E1000_TXCW, txcw);
++ mac->txcw = txcw;
++
++ return E1000_SUCCESS;
++}
++
++/**
++ * e1000_poll_fiber_serdes_link_generic - Poll for link up
++ * @hw: pointer to the HW structure
++ *
++ * Polls for link up by reading the status register, if link fails to come
++ * up with auto-negotiation, then the link is forced if a signal is detected.
++ **/
++static s32 e1000_poll_fiber_serdes_link_generic(struct e1000_hw *hw)
++{
++ struct e1000_mac_info *mac = &hw->mac;
++ u32 i, status;
++ s32 ret_val;
++
++ DEBUGFUNC("e1000_poll_fiber_serdes_link_generic");
++
++ /* If we have a signal (the cable is plugged in, or assumed true for
++ * serdes media) then poll for a "Link-Up" indication in the Device
++ * Status Register. Time-out if a link isn't seen in 500 milliseconds
++ * seconds (Auto-negotiation should complete in less than 500
++ * milliseconds even if the other end is doing it in SW).
++ */
++ for (i = 0; i < FIBER_LINK_UP_LIMIT; i++) {
++ msec_delay(10);
++ status = E1000_READ_REG(hw, E1000_STATUS);
++ if (status & E1000_STATUS_LU)
++ break;
++ }
++ if (i == FIBER_LINK_UP_LIMIT) {
++ DEBUGOUT("Never got a valid link from auto-neg!!!\n");
++ mac->autoneg_failed = true;
++ /* AutoNeg failed to achieve a link, so we'll call
++ * mac->check_for_link. This routine will force the
++ * link up if we detect a signal. This will allow us to
++ * communicate with non-autonegotiating link partners.
++ */
++ ret_val = mac->ops.check_for_link(hw);
++ if (ret_val) {
++ DEBUGOUT("Error while checking for link\n");
++ return ret_val;
++ }
++ mac->autoneg_failed = false;
++ } else {
++ mac->autoneg_failed = false;
++ DEBUGOUT("Valid Link Found\n");
++ }
++
++ return E1000_SUCCESS;
++}
++
++/**
++ * e1000_setup_fiber_serdes_link_generic - Setup link for fiber/serdes
++ * @hw: pointer to the HW structure
++ *
++ * Configures collision distance and flow control for fiber and serdes
++ * links. Upon successful setup, poll for link.
++ **/
++s32 e1000_setup_fiber_serdes_link_generic(struct e1000_hw *hw)
++{
++ u32 ctrl;
++ s32 ret_val;
++
++ DEBUGFUNC("e1000_setup_fiber_serdes_link_generic");
++
++ ctrl = E1000_READ_REG(hw, E1000_CTRL);
++
++ /* Take the link out of reset */
++ ctrl &= ~E1000_CTRL_LRST;
++
++ hw->mac.ops.config_collision_dist(hw);
++
++ ret_val = e1000_commit_fc_settings_generic(hw);
++ if (ret_val)
++ return ret_val;
++
++ /* Since auto-negotiation is enabled, take the link out of reset (the
++ * link will be in reset, because we previously reset the chip). This
++ * will restart auto-negotiation. If auto-negotiation is successful
++ * then the link-up status bit will be set and the flow control enable
++ * bits (RFCE and TFCE) will be set according to their negotiated value.
++ */
++ DEBUGOUT("Auto-negotiation enabled\n");
++
++ E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
++ E1000_WRITE_FLUSH(hw);
++ msec_delay(1);
++
++ /* For these adapters, the SW definable pin 1 is set when the optics
++ * detect a signal. If we have a signal, then poll for a "Link-Up"
++ * indication.
++ */
++ if (hw->phy.media_type == e1000_media_type_internal_serdes ||
++ (E1000_READ_REG(hw, E1000_CTRL) & E1000_CTRL_SWDPIN1)) {
++ ret_val = e1000_poll_fiber_serdes_link_generic(hw);
++ } else {
++ DEBUGOUT("No signal detected\n");
++ }
+
+ return ret_val;
+ }
+
+ /**
+- * igb_config_collision_dist - Configure collision distance
++ * e1000_config_collision_dist_generic - Configure collision distance
+ * @hw: pointer to the HW structure
+ *
+ * Configures the collision distance to the default value and is used
+- * during link setup. Currently no func pointer exists and all
+- * implementations are handled in the generic version of this function.
++ * during link setup.
+ **/
+-void igb_config_collision_dist(struct e1000_hw *hw)
++static void e1000_config_collision_dist_generic(struct e1000_hw *hw)
+ {
+ u32 tctl;
+
+- tctl = rd32(E1000_TCTL);
++ DEBUGFUNC("e1000_config_collision_dist_generic");
++
++ tctl = E1000_READ_REG(hw, E1000_TCTL);
+
+ tctl &= ~E1000_TCTL_COLD;
+ tctl |= E1000_COLLISION_DISTANCE << E1000_COLD_SHIFT;
+
+- wr32(E1000_TCTL, tctl);
+- wrfl();
++ E1000_WRITE_REG(hw, E1000_TCTL, tctl);
++ E1000_WRITE_FLUSH(hw);
+ }
+
+ /**
+- * igb_set_fc_watermarks - Set flow control high/low watermarks
++ * e1000_set_fc_watermarks_generic - Set flow control high/low watermarks
+ * @hw: pointer to the HW structure
+ *
+ * Sets the flow control high/low threshold (watermark) registers. If
+ * flow control XON frame transmission is enabled, then set XON frame
+- * tansmission as well.
++ * transmission as well.
+ **/
+-static s32 igb_set_fc_watermarks(struct e1000_hw *hw)
++s32 e1000_set_fc_watermarks_generic(struct e1000_hw *hw)
+ {
+- s32 ret_val = 0;
+ u32 fcrtl = 0, fcrth = 0;
+
++ DEBUGFUNC("e1000_set_fc_watermarks_generic");
++
+ /* Set the flow control receive threshold registers. Normally,
+ * these registers will be set to a default threshold that may be
+ * adjusted later by the driver's runtime code. However, if the
+@@ -692,61 +1170,14 @@
+
+ fcrth = hw->fc.high_water;
+ }
+- wr32(E1000_FCRTL, fcrtl);
+- wr32(E1000_FCRTH, fcrth);
++ E1000_WRITE_REG(hw, E1000_FCRTL, fcrtl);
++ E1000_WRITE_REG(hw, E1000_FCRTH, fcrth);
+
+- return ret_val;
++ return E1000_SUCCESS;
+ }
+
+ /**
+- * igb_set_default_fc - Set flow control default values
+- * @hw: pointer to the HW structure
+- *
+- * Read the EEPROM for the default values for flow control and store the
+- * values.
+- **/
+-static s32 igb_set_default_fc(struct e1000_hw *hw)
+-{
+- s32 ret_val = 0;
+- u16 lan_offset;
+- u16 nvm_data;
+-
+- /* Read and store word 0x0F of the EEPROM. This word contains bits
+- * that determine the hardware's default PAUSE (flow control) mode,
+- * a bit that determines whether the HW defaults to enabling or
+- * disabling auto-negotiation, and the direction of the
+- * SW defined pins. If there is no SW over-ride of the flow
+- * control setting, then the variable hw->fc will
+- * be initialized based on a value in the EEPROM.
+- */
+- if (hw->mac.type == e1000_i350) {
+- lan_offset = NVM_82580_LAN_FUNC_OFFSET(hw->bus.func);
+- ret_val = hw->nvm.ops.read(hw, NVM_INIT_CONTROL2_REG
+- + lan_offset, 1, &nvm_data);
+- } else {
+- ret_val = hw->nvm.ops.read(hw, NVM_INIT_CONTROL2_REG,
+- 1, &nvm_data);
+- }
+-
+- if (ret_val) {
+- hw_dbg("NVM Read Error\n");
+- goto out;
+- }
+-
+- if ((nvm_data & NVM_WORD0F_PAUSE_MASK) == 0)
+- hw->fc.requested_mode = e1000_fc_none;
+- else if ((nvm_data & NVM_WORD0F_PAUSE_MASK) ==
+- NVM_WORD0F_ASM_DIR)
+- hw->fc.requested_mode = e1000_fc_tx_pause;
+- else
+- hw->fc.requested_mode = e1000_fc_full;
+-
+-out:
+- return ret_val;
+-}
+-
+-/**
+- * igb_force_mac_fc - Force the MAC's flow control settings
++ * e1000_force_mac_fc_generic - Force the MAC's flow control settings
+ * @hw: pointer to the HW structure
+ *
+ * Force the MAC's flow control settings. Sets the TFCE and RFCE bits in the
+@@ -755,12 +1186,13 @@
+ * autonegotiation is managed by the PHY rather than the MAC. Software must
+ * also configure these bits when link is forced on a fiber connection.
+ **/
+-s32 igb_force_mac_fc(struct e1000_hw *hw)
++s32 e1000_force_mac_fc_generic(struct e1000_hw *hw)
+ {
+ u32 ctrl;
+- s32 ret_val = 0;
+
+- ctrl = rd32(E1000_CTRL);
++ DEBUGFUNC("e1000_force_mac_fc_generic");
++
++ ctrl = E1000_READ_REG(hw, E1000_CTRL);
+
+ /* Because we didn't get link via the internal auto-negotiation
+ * mechanism (we either forced link or we got link via PHY
+@@ -776,10 +1208,10 @@
+ * frames but not send pause frames).
+ * 2: Tx flow control is enabled (we can send pause frames
+ * frames but we do not receive pause frames).
+- * 3: Both Rx and TX flow control (symmetric) is enabled.
++ * 3: Both Rx and Tx flow control (symmetric) is enabled.
+ * other: No other values should be possible at this point.
+ */
+- hw_dbg("hw->fc.current_mode = %u\n", hw->fc.current_mode);
++ DEBUGOUT1("hw->fc.current_mode = %u\n", hw->fc.current_mode);
+
+ switch (hw->fc.current_mode) {
+ case e1000_fc_none:
+@@ -797,19 +1229,17 @@
+ ctrl |= (E1000_CTRL_TFCE | E1000_CTRL_RFCE);
+ break;
+ default:
+- hw_dbg("Flow control param set incorrectly\n");
+- ret_val = -E1000_ERR_CONFIG;
+- goto out;
++ DEBUGOUT("Flow control param set incorrectly\n");
++ return -E1000_ERR_CONFIG;
+ }
+
+- wr32(E1000_CTRL, ctrl);
++ E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
+
+-out:
+- return ret_val;
++ return E1000_SUCCESS;
+ }
+
+ /**
+- * igb_config_fc_after_link_up - Configures flow control after link
++ * e1000_config_fc_after_link_up_generic - Configures flow control after link
+ * @hw: pointer to the HW structure
+ *
+ * Checks the status of auto-negotiation after link up to ensure that the
+@@ -818,29 +1248,32 @@
+ * and did not fail, then we configure flow control based on our link
+ * partner.
+ **/
+-s32 igb_config_fc_after_link_up(struct e1000_hw *hw)
++s32 e1000_config_fc_after_link_up_generic(struct e1000_hw *hw)
+ {
+ struct e1000_mac_info *mac = &hw->mac;
+- s32 ret_val = 0;
++ s32 ret_val = E1000_SUCCESS;
+ u32 pcs_status_reg, pcs_adv_reg, pcs_lp_ability_reg, pcs_ctrl_reg;
+ u16 mii_status_reg, mii_nway_adv_reg, mii_nway_lp_ability_reg;
+ u16 speed, duplex;
+
++ DEBUGFUNC("e1000_config_fc_after_link_up_generic");
++
+ /* Check for the case where we have fiber media and auto-neg failed
+ * so we had to force link. In this case, we need to force the
+ * configuration of the MAC to match the "fc" parameter.
+ */
+ if (mac->autoneg_failed) {
+- if (hw->phy.media_type == e1000_media_type_internal_serdes)
+- ret_val = igb_force_mac_fc(hw);
++ if (hw->phy.media_type == e1000_media_type_fiber ||
++ hw->phy.media_type == e1000_media_type_internal_serdes)
++ ret_val = e1000_force_mac_fc_generic(hw);
+ } else {
+ if (hw->phy.media_type == e1000_media_type_copper)
+- ret_val = igb_force_mac_fc(hw);
++ ret_val = e1000_force_mac_fc_generic(hw);
+ }
+
+ if (ret_val) {
+- hw_dbg("Error forcing flow control settings\n");
+- goto out;
++ DEBUGOUT("Error forcing flow control settings\n");
++ return ret_val;
+ }
+
+ /* Check for the case where we have copper media and auto-neg is
+@@ -853,18 +1286,16 @@
+ * has completed. We read this twice because this reg has
+ * some "sticky" (latched) bits.
+ */
+- ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS,
+- &mii_status_reg);
++ ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &mii_status_reg);
+ if (ret_val)
+- goto out;
+- ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS,
+- &mii_status_reg);
++ return ret_val;
++ ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &mii_status_reg);
+ if (ret_val)
+- goto out;
++ return ret_val;
+
+ if (!(mii_status_reg & MII_SR_AUTONEG_COMPLETE)) {
+- hw_dbg("Copper PHY and Auto Neg has not completed.\n");
+- goto out;
++ DEBUGOUT("Copper PHY and Auto Neg has not completed.\n");
++ return ret_val;
+ }
+
+ /* The AutoNeg process has completed, so we now need to
+@@ -874,13 +1305,13 @@
+ * flow control was negotiated.
+ */
+ ret_val = hw->phy.ops.read_reg(hw, PHY_AUTONEG_ADV,
+- &mii_nway_adv_reg);
++ &mii_nway_adv_reg);
+ if (ret_val)
+- goto out;
++ return ret_val;
+ ret_val = hw->phy.ops.read_reg(hw, PHY_LP_ABILITY,
+- &mii_nway_lp_ability_reg);
++ &mii_nway_lp_ability_reg);
+ if (ret_val)
+- goto out;
++ return ret_val;
+
+ /* Two bits in the Auto Negotiation Advertisement Register
+ * (Address 4) and two bits in the Auto Negotiation Base
+@@ -917,18 +1348,18 @@
+ */
+ if ((mii_nway_adv_reg & NWAY_AR_PAUSE) &&
+ (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE)) {
+- /* Now we need to check if the user selected RX ONLY
++ /* Now we need to check if the user selected Rx ONLY
+ * of pause frames. In this case, we had to advertise
+- * FULL flow control because we could not advertise RX
++ * FULL flow control because we could not advertise Rx
+ * ONLY. Hence, we must now check to see if we need to
+- * turn OFF the TRANSMISSION of PAUSE frames.
++ * turn OFF the TRANSMISSION of PAUSE frames.
+ */
+ if (hw->fc.requested_mode == e1000_fc_full) {
+ hw->fc.current_mode = e1000_fc_full;
+- hw_dbg("Flow Control = FULL.\n");
++ DEBUGOUT("Flow Control = FULL.\n");
+ } else {
+ hw->fc.current_mode = e1000_fc_rx_pause;
+- hw_dbg("Flow Control = RX PAUSE frames only.\n");
++ DEBUGOUT("Flow Control = Rx PAUSE frames only.\n");
+ }
+ }
+ /* For receiving PAUSE frames ONLY.
+@@ -943,7 +1374,7 @@
+ (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
+ (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
+ hw->fc.current_mode = e1000_fc_tx_pause;
+- hw_dbg("Flow Control = TX PAUSE frames only.\n");
++ DEBUGOUT("Flow Control = Tx PAUSE frames only.\n");
+ }
+ /* For transmitting PAUSE frames ONLY.
+ *
+@@ -957,46 +1388,23 @@
+ !(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
+ (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
+ hw->fc.current_mode = e1000_fc_rx_pause;
+- hw_dbg("Flow Control = RX PAUSE frames only.\n");
+- }
+- /* Per the IEEE spec, at this point flow control should be
+- * disabled. However, we want to consider that we could
+- * be connected to a legacy switch that doesn't advertise
+- * desired flow control, but can be forced on the link
+- * partner. So if we advertised no flow control, that is
+- * what we will resolve to. If we advertised some kind of
+- * receive capability (Rx Pause Only or Full Flow Control)
+- * and the link partner advertised none, we will configure
+- * ourselves to enable Rx Flow Control only. We can do
+- * this safely for two reasons: If the link partner really
+- * didn't want flow control enabled, and we enable Rx, no
+- * harm done since we won't be receiving any PAUSE frames
+- * anyway. If the intent on the link partner was to have
+- * flow control enabled, then by us enabling RX only, we
+- * can at least receive pause frames and process them.
+- * This is a good idea because in most cases, since we are
+- * predominantly a server NIC, more times than not we will
+- * be asked to delay transmission of packets than asking
+- * our link partner to pause transmission of frames.
+- */
+- else if ((hw->fc.requested_mode == e1000_fc_none) ||
+- (hw->fc.requested_mode == e1000_fc_tx_pause) ||
+- (hw->fc.strict_ieee)) {
+- hw->fc.current_mode = e1000_fc_none;
+- hw_dbg("Flow Control = NONE.\n");
++ DEBUGOUT("Flow Control = Rx PAUSE frames only.\n");
+ } else {
+- hw->fc.current_mode = e1000_fc_rx_pause;
+- hw_dbg("Flow Control = RX PAUSE frames only.\n");
++ /* Per the IEEE spec, at this point flow control
++ * should be disabled.
++ */
++ hw->fc.current_mode = e1000_fc_none;
++ DEBUGOUT("Flow Control = NONE.\n");
+ }
+
+ /* Now we need to do one last check... If we auto-
+ * negotiated to HALF DUPLEX, flow control should not be
+ * enabled per IEEE 802.3 spec.
+ */
+- ret_val = hw->mac.ops.get_speed_and_duplex(hw, &speed, &duplex);
++ ret_val = mac->ops.get_link_up_info(hw, &speed, &duplex);
+ if (ret_val) {
+- hw_dbg("Error getting link speed and duplex\n");
+- goto out;
++ DEBUGOUT("Error getting link speed and duplex\n");
++ return ret_val;
+ }
+
+ if (duplex == HALF_DUPLEX)
+@@ -1005,26 +1413,27 @@
+ /* Now we call a subroutine to actually force the MAC
+ * controller to use the correct flow control settings.
+ */
+- ret_val = igb_force_mac_fc(hw);
++ ret_val = e1000_force_mac_fc_generic(hw);
+ if (ret_val) {
+- hw_dbg("Error forcing flow control settings\n");
+- goto out;
++ DEBUGOUT("Error forcing flow control settings\n");
++ return ret_val;
+ }
+ }
++
+ /* Check for the case where we have SerDes media and auto-neg is
+ * enabled. In this case, we need to check and see if Auto-Neg
+ * has completed, and if so, how the PHY and link partner has
+ * flow control configured.
+ */
+- if ((hw->phy.media_type == e1000_media_type_internal_serdes)
+- && mac->autoneg) {
++ if ((hw->phy.media_type == e1000_media_type_internal_serdes) &&
++ mac->autoneg) {
+ /* Read the PCS_LSTS and check to see if AutoNeg
+ * has completed.
+ */
+- pcs_status_reg = rd32(E1000_PCS_LSTAT);
++ pcs_status_reg = E1000_READ_REG(hw, E1000_PCS_LSTAT);
+
+ if (!(pcs_status_reg & E1000_PCS_LSTS_AN_COMPLETE)) {
+- hw_dbg("PCS Auto Neg has not completed.\n");
++ DEBUGOUT("PCS Auto Neg has not completed.\n");
+ return ret_val;
+ }
+
+@@ -1034,8 +1443,8 @@
+ * Page Ability Register (PCS_LPAB) to determine how
+ * flow control was negotiated.
+ */
+- pcs_adv_reg = rd32(E1000_PCS_ANADV);
+- pcs_lp_ability_reg = rd32(E1000_PCS_LPAB);
++ pcs_adv_reg = E1000_READ_REG(hw, E1000_PCS_ANADV);
++ pcs_lp_ability_reg = E1000_READ_REG(hw, E1000_PCS_LPAB);
+
+ /* Two bits in the Auto Negotiation Advertisement Register
+ * (PCS_ANADV) and two bits in the Auto Negotiation Base
+@@ -1080,10 +1489,10 @@
+ */
+ if (hw->fc.requested_mode == e1000_fc_full) {
+ hw->fc.current_mode = e1000_fc_full;
+- hw_dbg("Flow Control = FULL.\n");
++ DEBUGOUT("Flow Control = FULL.\n");
+ } else {
+ hw->fc.current_mode = e1000_fc_rx_pause;
+- hw_dbg("Flow Control = Rx PAUSE frames only.\n");
++ DEBUGOUT("Flow Control = Rx PAUSE frames only.\n");
+ }
+ }
+ /* For receiving PAUSE frames ONLY.
+@@ -1098,7 +1507,7 @@
+ (pcs_lp_ability_reg & E1000_TXCW_PAUSE) &&
+ (pcs_lp_ability_reg & E1000_TXCW_ASM_DIR)) {
+ hw->fc.current_mode = e1000_fc_tx_pause;
+- hw_dbg("Flow Control = Tx PAUSE frames only.\n");
++ DEBUGOUT("Flow Control = Tx PAUSE frames only.\n");
+ }
+ /* For transmitting PAUSE frames ONLY.
+ *
+@@ -1112,35 +1521,34 @@
+ !(pcs_lp_ability_reg & E1000_TXCW_PAUSE) &&
+ (pcs_lp_ability_reg & E1000_TXCW_ASM_DIR)) {
+ hw->fc.current_mode = e1000_fc_rx_pause;
+- hw_dbg("Flow Control = Rx PAUSE frames only.\n");
++ DEBUGOUT("Flow Control = Rx PAUSE frames only.\n");
+ } else {
+ /* Per the IEEE spec, at this point flow control
+ * should be disabled.
+ */
+ hw->fc.current_mode = e1000_fc_none;
+- hw_dbg("Flow Control = NONE.\n");
++ DEBUGOUT("Flow Control = NONE.\n");
+ }
+
+ /* Now we call a subroutine to actually force the MAC
+ * controller to use the correct flow control settings.
+ */
+- pcs_ctrl_reg = rd32(E1000_PCS_LCTL);
++ pcs_ctrl_reg = E1000_READ_REG(hw, E1000_PCS_LCTL);
+ pcs_ctrl_reg |= E1000_PCS_LCTL_FORCE_FCTRL;
+- wr32(E1000_PCS_LCTL, pcs_ctrl_reg);
++ E1000_WRITE_REG(hw, E1000_PCS_LCTL, pcs_ctrl_reg);
+
+- ret_val = igb_force_mac_fc(hw);
++ ret_val = e1000_force_mac_fc_generic(hw);
+ if (ret_val) {
+- hw_dbg("Error forcing flow control settings\n");
++ DEBUGOUT("Error forcing flow control settings\n");
+ return ret_val;
+ }
+ }
+
+-out:
+- return ret_val;
++ return E1000_SUCCESS;
+ }
+
+ /**
+- * igb_get_speed_and_duplex_copper - Retrieve current speed/duplex
++ * e1000_get_speed_and_duplex_copper_generic - Retrieve current speed/duplex
+ * @hw: pointer to the HW structure
+ * @speed: stores the current speed
+ * @duplex: stores the current duplex
+@@ -1148,172 +1556,185 @@
+ * Read the status register for the current speed/duplex and store the current
+ * speed and duplex for copper connections.
+ **/
+-s32 igb_get_speed_and_duplex_copper(struct e1000_hw *hw, u16 *speed,
+- u16 *duplex)
++s32 e1000_get_speed_and_duplex_copper_generic(struct e1000_hw *hw, u16 *speed,
++ u16 *duplex)
+ {
+ u32 status;
+
+- status = rd32(E1000_STATUS);
++ DEBUGFUNC("e1000_get_speed_and_duplex_copper_generic");
++
++ status = E1000_READ_REG(hw, E1000_STATUS);
+ if (status & E1000_STATUS_SPEED_1000) {
+ *speed = SPEED_1000;
+- hw_dbg("1000 Mbs, ");
++ DEBUGOUT("1000 Mbs, ");
+ } else if (status & E1000_STATUS_SPEED_100) {
+ *speed = SPEED_100;
+- hw_dbg("100 Mbs, ");
++ DEBUGOUT("100 Mbs, ");
+ } else {
+ *speed = SPEED_10;
+- hw_dbg("10 Mbs, ");
++ DEBUGOUT("10 Mbs, ");
+ }
+
+ if (status & E1000_STATUS_FD) {
+ *duplex = FULL_DUPLEX;
+- hw_dbg("Full Duplex\n");
++ DEBUGOUT("Full Duplex\n");
+ } else {
+ *duplex = HALF_DUPLEX;
+- hw_dbg("Half Duplex\n");
++ DEBUGOUT("Half Duplex\n");
+ }
+
+- return 0;
++ return E1000_SUCCESS;
+ }
+
+ /**
+- * igb_get_hw_semaphore - Acquire hardware semaphore
++ * e1000_get_speed_and_duplex_fiber_generic - Retrieve current speed/duplex
++ * @hw: pointer to the HW structure
++ * @speed: stores the current speed
++ * @duplex: stores the current duplex
++ *
++ * Sets the speed and duplex to gigabit full duplex (the only possible option)
++ * for fiber/serdes links.
++ **/
++s32 e1000_get_speed_and_duplex_fiber_serdes_generic(struct e1000_hw E1000_UNUSEDARG *hw,
++ u16 *speed, u16 *duplex)
++{
++ DEBUGFUNC("e1000_get_speed_and_duplex_fiber_serdes_generic");
++
++ *speed = SPEED_1000;
++ *duplex = FULL_DUPLEX;
++
++ return E1000_SUCCESS;
++}
++
++/**
++ * e1000_get_hw_semaphore_generic - Acquire hardware semaphore
+ * @hw: pointer to the HW structure
+ *
+ * Acquire the HW semaphore to access the PHY or NVM
+ **/
+-s32 igb_get_hw_semaphore(struct e1000_hw *hw)
++s32 e1000_get_hw_semaphore_generic(struct e1000_hw *hw)
+ {
+ u32 swsm;
+- s32 ret_val = 0;
+ s32 timeout = hw->nvm.word_size + 1;
+ s32 i = 0;
+
++ DEBUGFUNC("e1000_get_hw_semaphore_generic");
++
+ /* Get the SW semaphore */
+ while (i < timeout) {
+- swsm = rd32(E1000_SWSM);
++ swsm = E1000_READ_REG(hw, E1000_SWSM);
+ if (!(swsm & E1000_SWSM_SMBI))
+ break;
+
+- udelay(50);
++ usec_delay(50);
+ i++;
+ }
+
+ if (i == timeout) {
+- hw_dbg("Driver can't access device - SMBI bit is set.\n");
+- ret_val = -E1000_ERR_NVM;
+- goto out;
++ DEBUGOUT("Driver can't access device - SMBI bit is set.\n");
++ return -E1000_ERR_NVM;
+ }
+
+ /* Get the FW semaphore. */
+ for (i = 0; i < timeout; i++) {
+- swsm = rd32(E1000_SWSM);
+- wr32(E1000_SWSM, swsm | E1000_SWSM_SWESMBI);
++ swsm = E1000_READ_REG(hw, E1000_SWSM);
++ E1000_WRITE_REG(hw, E1000_SWSM, swsm | E1000_SWSM_SWESMBI);
+
+ /* Semaphore acquired if bit latched */
+- if (rd32(E1000_SWSM) & E1000_SWSM_SWESMBI)
++ if (E1000_READ_REG(hw, E1000_SWSM) & E1000_SWSM_SWESMBI)
+ break;
+
+- udelay(50);
++ usec_delay(50);
+ }
+
+ if (i == timeout) {
+ /* Release semaphores */
+- igb_put_hw_semaphore(hw);
+- hw_dbg("Driver can't access the NVM\n");
+- ret_val = -E1000_ERR_NVM;
+- goto out;
++ e1000_put_hw_semaphore_generic(hw);
++ DEBUGOUT("Driver can't access the NVM\n");
++ return -E1000_ERR_NVM;
+ }
+
+-out:
+- return ret_val;
++ return E1000_SUCCESS;
+ }
+
+ /**
+- * igb_put_hw_semaphore - Release hardware semaphore
++ * e1000_put_hw_semaphore_generic - Release hardware semaphore
+ * @hw: pointer to the HW structure
+ *
+ * Release hardware semaphore used to access the PHY or NVM
+ **/
+-void igb_put_hw_semaphore(struct e1000_hw *hw)
++void e1000_put_hw_semaphore_generic(struct e1000_hw *hw)
+ {
+ u32 swsm;
+
+- swsm = rd32(E1000_SWSM);
++ DEBUGFUNC("e1000_put_hw_semaphore_generic");
++
++ swsm = E1000_READ_REG(hw, E1000_SWSM);
+
+ swsm &= ~(E1000_SWSM_SMBI | E1000_SWSM_SWESMBI);
+
+- wr32(E1000_SWSM, swsm);
++ E1000_WRITE_REG(hw, E1000_SWSM, swsm);
+ }
+
+ /**
+- * igb_get_auto_rd_done - Check for auto read completion
++ * e1000_get_auto_rd_done_generic - Check for auto read completion
+ * @hw: pointer to the HW structure
+ *
+ * Check EEPROM for Auto Read done bit.
+ **/
+-s32 igb_get_auto_rd_done(struct e1000_hw *hw)
++s32 e1000_get_auto_rd_done_generic(struct e1000_hw *hw)
+ {
+ s32 i = 0;
+- s32 ret_val = 0;
+
++ DEBUGFUNC("e1000_get_auto_rd_done_generic");
+
+ while (i < AUTO_READ_DONE_TIMEOUT) {
+- if (rd32(E1000_EECD) & E1000_EECD_AUTO_RD)
++ if (E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_AUTO_RD)
+ break;
+- usleep_range(1000, 2000);
++ msec_delay(1);
+ i++;
+ }
+
+ if (i == AUTO_READ_DONE_TIMEOUT) {
+- hw_dbg("Auto read by HW from NVM has not completed.\n");
+- ret_val = -E1000_ERR_RESET;
+- goto out;
++ DEBUGOUT("Auto read by HW from NVM has not completed.\n");
++ return -E1000_ERR_RESET;
+ }
+
+-out:
+- return ret_val;
++ return E1000_SUCCESS;
+ }
+
+ /**
+- * igb_valid_led_default - Verify a valid default LED config
++ * e1000_valid_led_default_generic - Verify a valid default LED config
+ * @hw: pointer to the HW structure
+ * @data: pointer to the NVM (EEPROM)
+ *
+ * Read the EEPROM for the current default LED configuration. If the
+ * LED configuration is not valid, set to a valid LED configuration.
+ **/
+-static s32 igb_valid_led_default(struct e1000_hw *hw, u16 *data)
++s32 e1000_valid_led_default_generic(struct e1000_hw *hw, u16 *data)
+ {
+ s32 ret_val;
+
++ DEBUGFUNC("e1000_valid_led_default_generic");
++
+ ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data);
+ if (ret_val) {
+- hw_dbg("NVM Read Error\n");
+- goto out;
++ DEBUGOUT("NVM Read Error\n");
++ return ret_val;
+ }
+
+- if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF) {
+- switch (hw->phy.media_type) {
+- case e1000_media_type_internal_serdes:
+- *data = ID_LED_DEFAULT_82575_SERDES;
+- break;
+- case e1000_media_type_copper:
+- default:
+- *data = ID_LED_DEFAULT;
+- break;
+- }
+- }
+-out:
+- return ret_val;
++ if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF)
++ *data = ID_LED_DEFAULT;
++
++ return E1000_SUCCESS;
+ }
+
+ /**
+- * igb_id_led_init -
++ * e1000_id_led_init_generic -
+ * @hw: pointer to the HW structure
+ *
+ **/
+-s32 igb_id_led_init(struct e1000_hw *hw)
++s32 e1000_id_led_init_generic(struct e1000_hw *hw)
+ {
+ struct e1000_mac_info *mac = &hw->mac;
+ s32 ret_val;
+@@ -1323,17 +1744,13 @@
+ u16 data, i, temp;
+ const u16 led_mask = 0x0F;
+
+- /* i210 and i211 devices have different LED mechanism */
+- if ((hw->mac.type == e1000_i210) ||
+- (hw->mac.type == e1000_i211))
+- ret_val = igb_valid_led_default_i210(hw, &data);
+- else
+- ret_val = igb_valid_led_default(hw, &data);
++ DEBUGFUNC("e1000_id_led_init_generic");
+
++ ret_val = hw->nvm.ops.valid_led_default(hw, &data);
+ if (ret_val)
+- goto out;
++ return ret_val;
+
+- mac->ledctl_default = rd32(E1000_LEDCTL);
++ mac->ledctl_default = E1000_READ_REG(hw, E1000_LEDCTL);
+ mac->ledctl_mode1 = mac->ledctl_default;
+ mac->ledctl_mode2 = mac->ledctl_default;
+
+@@ -1375,34 +1792,69 @@
+ }
+ }
+
+-out:
+- return ret_val;
++ return E1000_SUCCESS;
+ }
+
+ /**
+- * igb_cleanup_led - Set LED config to default operation
++ * e1000_setup_led_generic - Configures SW controllable LED
++ * @hw: pointer to the HW structure
++ *
++ * This prepares the SW controllable LED for use and saves the current state
++ * of the LED so it can be later restored.
++ **/
++s32 e1000_setup_led_generic(struct e1000_hw *hw)
++{
++ u32 ledctl;
++
++ DEBUGFUNC("e1000_setup_led_generic");
++
++ if (hw->mac.ops.setup_led != e1000_setup_led_generic)
++ return -E1000_ERR_CONFIG;
++
++ if (hw->phy.media_type == e1000_media_type_fiber) {
++ ledctl = E1000_READ_REG(hw, E1000_LEDCTL);
++ hw->mac.ledctl_default = ledctl;
++ /* Turn off LED0 */
++ ledctl &= ~(E1000_LEDCTL_LED0_IVRT | E1000_LEDCTL_LED0_BLINK |
++ E1000_LEDCTL_LED0_MODE_MASK);
++ ledctl |= (E1000_LEDCTL_MODE_LED_OFF <<
++ E1000_LEDCTL_LED0_MODE_SHIFT);
++ E1000_WRITE_REG(hw, E1000_LEDCTL, ledctl);
++ } else if (hw->phy.media_type == e1000_media_type_copper) {
++ E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode1);
++ }
++
++ return E1000_SUCCESS;
++}
++
++/**
++ * e1000_cleanup_led_generic - Set LED config to default operation
+ * @hw: pointer to the HW structure
+ *
+ * Remove the current LED configuration and set the LED configuration
+ * to the default value, saved from the EEPROM.
+ **/
+-s32 igb_cleanup_led(struct e1000_hw *hw)
++s32 e1000_cleanup_led_generic(struct e1000_hw *hw)
+ {
+- wr32(E1000_LEDCTL, hw->mac.ledctl_default);
+- return 0;
++ DEBUGFUNC("e1000_cleanup_led_generic");
++
++ E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_default);
++ return E1000_SUCCESS;
+ }
+
+ /**
+- * igb_blink_led - Blink LED
++ * e1000_blink_led_generic - Blink LED
+ * @hw: pointer to the HW structure
+ *
+- * Blink the led's which are set to be on.
++ * Blink the LEDs which are set to be on.
+ **/
+-s32 igb_blink_led(struct e1000_hw *hw)
++s32 e1000_blink_led_generic(struct e1000_hw *hw)
+ {
+ u32 ledctl_blink = 0;
+ u32 i;
+
++ DEBUGFUNC("e1000_blink_led_generic");
++
+ if (hw->phy.media_type == e1000_media_type_fiber) {
+ /* always blink LED0 for PCI-E fiber */
+ ledctl_blink = E1000_LEDCTL_LED0_BLINK |
+@@ -1432,100 +1884,239 @@
+ }
+ }
+
+- wr32(E1000_LEDCTL, ledctl_blink);
++ E1000_WRITE_REG(hw, E1000_LEDCTL, ledctl_blink);
++
++ return E1000_SUCCESS;
++}
++
++/**
++ * e1000_led_on_generic - Turn LED on
++ * @hw: pointer to the HW structure
++ *
++ * Turn LED on.
++ **/
++s32 e1000_led_on_generic(struct e1000_hw *hw)
++{
++ u32 ctrl;
++
++ DEBUGFUNC("e1000_led_on_generic");
++
++ switch (hw->phy.media_type) {
++ case e1000_media_type_fiber:
++ ctrl = E1000_READ_REG(hw, E1000_CTRL);
++ ctrl &= ~E1000_CTRL_SWDPIN0;
++ ctrl |= E1000_CTRL_SWDPIO0;
++ E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
++ break;
++ case e1000_media_type_copper:
++ E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode2);
++ break;
++ default:
++ break;
++ }
+
+- return 0;
++ return E1000_SUCCESS;
+ }
+
+ /**
+- * igb_led_off - Turn LED off
++ * e1000_led_off_generic - Turn LED off
+ * @hw: pointer to the HW structure
+ *
+ * Turn LED off.
+ **/
+-s32 igb_led_off(struct e1000_hw *hw)
++s32 e1000_led_off_generic(struct e1000_hw *hw)
+ {
++ u32 ctrl;
++
++ DEBUGFUNC("e1000_led_off_generic");
++
+ switch (hw->phy.media_type) {
++ case e1000_media_type_fiber:
++ ctrl = E1000_READ_REG(hw, E1000_CTRL);
++ ctrl |= E1000_CTRL_SWDPIN0;
++ ctrl |= E1000_CTRL_SWDPIO0;
++ E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
++ break;
+ case e1000_media_type_copper:
+- wr32(E1000_LEDCTL, hw->mac.ledctl_mode1);
++ E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode1);
+ break;
+ default:
+ break;
+ }
+
+- return 0;
++ return E1000_SUCCESS;
++}
++
++/**
++ * e1000_set_pcie_no_snoop_generic - Set PCI-express capabilities
++ * @hw: pointer to the HW structure
++ * @no_snoop: bitmap of snoop events
++ *
++ * Set the PCI-express register to snoop for events enabled in 'no_snoop'.
++ **/
++void e1000_set_pcie_no_snoop_generic(struct e1000_hw *hw, u32 no_snoop)
++{
++ u32 gcr;
++
++ DEBUGFUNC("e1000_set_pcie_no_snoop_generic");
++
++ if (hw->bus.type != e1000_bus_type_pci_express)
++ return;
++
++ if (no_snoop) {
++ gcr = E1000_READ_REG(hw, E1000_GCR);
++ gcr &= ~(PCIE_NO_SNOOP_ALL);
++ gcr |= no_snoop;
++ E1000_WRITE_REG(hw, E1000_GCR, gcr);
++ }
+ }
+
+ /**
+- * igb_disable_pcie_master - Disables PCI-express master access
++ * e1000_disable_pcie_master_generic - Disables PCI-express master access
+ * @hw: pointer to the HW structure
+ *
+- * Returns 0 (0) if successful, else returns -10
++ * Returns E1000_SUCCESS if successful, else returns -10
+ * (-E1000_ERR_MASTER_REQUESTS_PENDING) if master disable bit has not caused
+ * the master requests to be disabled.
+ *
+ * Disables PCI-Express master access and verifies there are no pending
+ * requests.
+ **/
+-s32 igb_disable_pcie_master(struct e1000_hw *hw)
++s32 e1000_disable_pcie_master_generic(struct e1000_hw *hw)
+ {
+ u32 ctrl;
+ s32 timeout = MASTER_DISABLE_TIMEOUT;
+- s32 ret_val = 0;
++
++ DEBUGFUNC("e1000_disable_pcie_master_generic");
+
+ if (hw->bus.type != e1000_bus_type_pci_express)
+- goto out;
++ return E1000_SUCCESS;
+
+- ctrl = rd32(E1000_CTRL);
++ ctrl = E1000_READ_REG(hw, E1000_CTRL);
+ ctrl |= E1000_CTRL_GIO_MASTER_DISABLE;
+- wr32(E1000_CTRL, ctrl);
++ E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
+
+ while (timeout) {
+- if (!(rd32(E1000_STATUS) &
+- E1000_STATUS_GIO_MASTER_ENABLE))
++ if (!(E1000_READ_REG(hw, E1000_STATUS) &
++ E1000_STATUS_GIO_MASTER_ENABLE) ||
++ E1000_REMOVED(hw->hw_addr))
+ break;
+- udelay(100);
++ usec_delay(100);
+ timeout--;
+ }
+
+ if (!timeout) {
+- hw_dbg("Master requests are pending.\n");
+- ret_val = -E1000_ERR_MASTER_REQUESTS_PENDING;
+- goto out;
++ DEBUGOUT("Master requests are pending.\n");
++ return -E1000_ERR_MASTER_REQUESTS_PENDING;
+ }
+
+-out:
+- return ret_val;
++ return E1000_SUCCESS;
+ }
+
+ /**
+- * igb_validate_mdi_setting - Verify MDI/MDIx settings
++ * e1000_reset_adaptive_generic - Reset Adaptive Interframe Spacing
+ * @hw: pointer to the HW structure
+ *
+- * Verify that when not using auto-negotitation that MDI/MDIx is correctly
+- * set, which is forced to MDI mode only.
++ * Reset the Adaptive Interframe Spacing throttle to default values.
+ **/
+-s32 igb_validate_mdi_setting(struct e1000_hw *hw)
++void e1000_reset_adaptive_generic(struct e1000_hw *hw)
+ {
+- s32 ret_val = 0;
++ struct e1000_mac_info *mac = &hw->mac;
+
+- /* All MDI settings are supported on 82580 and newer. */
+- if (hw->mac.type >= e1000_82580)
+- goto out;
++ DEBUGFUNC("e1000_reset_adaptive_generic");
++
++ if (!mac->adaptive_ifs) {
++ DEBUGOUT("Not in Adaptive IFS mode!\n");
++ return;
++ }
++
++ mac->current_ifs_val = 0;
++ mac->ifs_min_val = IFS_MIN;
++ mac->ifs_max_val = IFS_MAX;
++ mac->ifs_step_size = IFS_STEP;
++ mac->ifs_ratio = IFS_RATIO;
++
++ mac->in_ifs_mode = false;
++ E1000_WRITE_REG(hw, E1000_AIT, 0);
++}
++
++/**
++ * e1000_update_adaptive_generic - Update Adaptive Interframe Spacing
++ * @hw: pointer to the HW structure
++ *
++ * Update the Adaptive Interframe Spacing Throttle value based on the
++ * time between transmitted packets and time between collisions.
++ **/
++void e1000_update_adaptive_generic(struct e1000_hw *hw)
++{
++ struct e1000_mac_info *mac = &hw->mac;
++
++ DEBUGFUNC("e1000_update_adaptive_generic");
++
++ if (!mac->adaptive_ifs) {
++ DEBUGOUT("Not in Adaptive IFS mode!\n");
++ return;
++ }
++
++ if ((mac->collision_delta * mac->ifs_ratio) > mac->tx_packet_delta) {
++ if (mac->tx_packet_delta > MIN_NUM_XMITS) {
++ mac->in_ifs_mode = true;
++ if (mac->current_ifs_val < mac->ifs_max_val) {
++ if (!mac->current_ifs_val)
++ mac->current_ifs_val = mac->ifs_min_val;
++ else
++ mac->current_ifs_val +=
++ mac->ifs_step_size;
++ E1000_WRITE_REG(hw, E1000_AIT,
++ mac->current_ifs_val);
++ }
++ }
++ } else {
++ if (mac->in_ifs_mode &&
++ (mac->tx_packet_delta <= MIN_NUM_XMITS)) {
++ mac->current_ifs_val = 0;
++ mac->in_ifs_mode = false;
++ E1000_WRITE_REG(hw, E1000_AIT, 0);
++ }
++ }
++}
++
++/**
++ * e1000_validate_mdi_setting_generic - Verify MDI/MDIx settings
++ * @hw: pointer to the HW structure
++ *
++ * Verify that when not using auto-negotiation that MDI/MDIx is correctly
++ * set, which is forced to MDI mode only.
++ **/
++static s32 e1000_validate_mdi_setting_generic(struct e1000_hw *hw)
++{
++ DEBUGFUNC("e1000_validate_mdi_setting_generic");
+
+ if (!hw->mac.autoneg && (hw->phy.mdix == 0 || hw->phy.mdix == 3)) {
+- hw_dbg("Invalid MDI setting detected\n");
++ DEBUGOUT("Invalid MDI setting detected\n");
+ hw->phy.mdix = 1;
+- ret_val = -E1000_ERR_CONFIG;
+- goto out;
++ return -E1000_ERR_CONFIG;
+ }
+
+-out:
+- return ret_val;
++ return E1000_SUCCESS;
+ }
+
+ /**
+- * igb_write_8bit_ctrl_reg - Write a 8bit CTRL register
++ * e1000_validate_mdi_setting_crossover_generic - Verify MDI/MDIx settings
++ * @hw: pointer to the HW structure
++ *
++ * Validate the MDI/MDIx setting, allowing for auto-crossover during forced
++ * operation.
++ **/
++s32 e1000_validate_mdi_setting_crossover_generic(struct e1000_hw E1000_UNUSEDARG *hw)
++{
++ DEBUGFUNC("e1000_validate_mdi_setting_crossover_generic");
++
++ return E1000_SUCCESS;
++}
++
++/**
++ * e1000_write_8bit_ctrl_reg_generic - Write a 8bit CTRL register
+ * @hw: pointer to the HW structure
+ * @reg: 32bit register offset such as E1000_SCTL
+ * @offset: register offset to write to
+@@ -1535,72 +2126,28 @@
+ * and they all have the format address << 8 | data and bit 31 is polled for
+ * completion.
+ **/
+-s32 igb_write_8bit_ctrl_reg(struct e1000_hw *hw, u32 reg,
+- u32 offset, u8 data)
++s32 e1000_write_8bit_ctrl_reg_generic(struct e1000_hw *hw, u32 reg,
++ u32 offset, u8 data)
+ {
+ u32 i, regvalue = 0;
+- s32 ret_val = 0;
++
++ DEBUGFUNC("e1000_write_8bit_ctrl_reg_generic");
+
+ /* Set up the address and data */
+ regvalue = ((u32)data) | (offset << E1000_GEN_CTL_ADDRESS_SHIFT);
+- wr32(reg, regvalue);
++ E1000_WRITE_REG(hw, reg, regvalue);
+
+ /* Poll the ready bit to see if the MDI read completed */
+ for (i = 0; i < E1000_GEN_POLL_TIMEOUT; i++) {
+- udelay(5);
+- regvalue = rd32(reg);
++ usec_delay(5);
++ regvalue = E1000_READ_REG(hw, reg);
+ if (regvalue & E1000_GEN_CTL_READY)
+ break;
+ }
+ if (!(regvalue & E1000_GEN_CTL_READY)) {
+- hw_dbg("Reg %08x did not indicate ready\n", reg);
+- ret_val = -E1000_ERR_PHY;
+- goto out;
+- }
+-
+-out:
+- return ret_val;
+-}
+-
+-/**
+- * igb_enable_mng_pass_thru - Enable processing of ARP's
+- * @hw: pointer to the HW structure
+- *
+- * Verifies the hardware needs to leave interface enabled so that frames can
+- * be directed to and from the management interface.
+- **/
+-bool igb_enable_mng_pass_thru(struct e1000_hw *hw)
+-{
+- u32 manc;
+- u32 fwsm, factps;
+- bool ret_val = false;
+-
+- if (!hw->mac.asf_firmware_present)
+- goto out;
+-
+- manc = rd32(E1000_MANC);
+-
+- if (!(manc & E1000_MANC_RCV_TCO_EN))
+- goto out;
+-
+- if (hw->mac.arc_subsystem_valid) {
+- fwsm = rd32(E1000_FWSM);
+- factps = rd32(E1000_FACTPS);
+-
+- if (!(factps & E1000_FACTPS_MNGCG) &&
+- ((fwsm & E1000_FWSM_MODE_MASK) ==
+- (e1000_mng_mode_pt << E1000_FWSM_MODE_SHIFT))) {
+- ret_val = true;
+- goto out;
+- }
+- } else {
+- if ((manc & E1000_MANC_SMBUS_EN) &&
+- !(manc & E1000_MANC_ASF_EN)) {
+- ret_val = true;
+- goto out;
+- }
++ DEBUGOUT1("Reg %08x did not indicate ready\n", reg);
++ return -E1000_ERR_PHY;
+ }
+
+-out:
+- return ret_val;
++ return E1000_SUCCESS;
+ }
+diff -Nu a/drivers/net/ethernet/intel/igb/e1000_mac.h b/drivers/net/ethernet/intel/igb/e1000_mac.h
+--- a/drivers/net/ethernet/intel/igb/e1000_mac.h 2016-11-13 09:20:24.790171605 +0000
++++ b/drivers/net/ethernet/intel/igb/e1000_mac.h 2016-11-14 14:32:08.579567168 +0000
+@@ -1,87 +1,81 @@
+-/* Intel(R) Gigabit Ethernet Linux driver
+- * Copyright(c) 2007-2014 Intel Corporation.
+- *
+- * This program is free software; you can redistribute it and/or modify it
+- * under the terms and conditions of the GNU General Public License,
+- * version 2, as published by the Free Software Foundation.
+- *
+- * This program is distributed in the hope it will be useful, but WITHOUT
+- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+- * more details.
+- *
+- * You should have received a copy of the GNU General Public License along with
+- * this program; if not, see .
+- *
+- * The full GNU General Public License is included in this distribution in
+- * the file called "COPYING".
+- *
+- * Contact Information:
+- * e1000-devel Mailing List
+- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+- */
++/*******************************************************************************
+
+-#ifndef _E1000_MAC_H_
+-#define _E1000_MAC_H_
++ Intel(R) Gigabit Ethernet Linux driver
++ Copyright(c) 2007-2015 Intel Corporation.
++
++ This program is free software; you can redistribute it and/or modify it
++ under the terms and conditions of the GNU General Public License,
++ version 2, as published by the Free Software Foundation.
+
+-#include "e1000_hw.h"
++ This program is distributed in the hope it will be useful, but WITHOUT
++ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ more details.
+
+-#include "e1000_phy.h"
+-#include "e1000_nvm.h"
+-#include "e1000_defines.h"
+-#include "e1000_i210.h"
+-
+-/* Functions that should not be called directly from drivers but can be used
+- * by other files in this 'shared code'
+- */
+-s32 igb_blink_led(struct e1000_hw *hw);
+-s32 igb_check_for_copper_link(struct e1000_hw *hw);
+-s32 igb_cleanup_led(struct e1000_hw *hw);
+-s32 igb_config_fc_after_link_up(struct e1000_hw *hw);
+-s32 igb_disable_pcie_master(struct e1000_hw *hw);
+-s32 igb_force_mac_fc(struct e1000_hw *hw);
+-s32 igb_get_auto_rd_done(struct e1000_hw *hw);
+-s32 igb_get_bus_info_pcie(struct e1000_hw *hw);
+-s32 igb_get_hw_semaphore(struct e1000_hw *hw);
+-s32 igb_get_speed_and_duplex_copper(struct e1000_hw *hw, u16 *speed,
+- u16 *duplex);
+-s32 igb_id_led_init(struct e1000_hw *hw);
+-s32 igb_led_off(struct e1000_hw *hw);
+-void igb_update_mc_addr_list(struct e1000_hw *hw,
+- u8 *mc_addr_list, u32 mc_addr_count);
+-s32 igb_setup_link(struct e1000_hw *hw);
+-s32 igb_validate_mdi_setting(struct e1000_hw *hw);
+-s32 igb_write_8bit_ctrl_reg(struct e1000_hw *hw, u32 reg,
+- u32 offset, u8 data);
+-
+-void igb_clear_hw_cntrs_base(struct e1000_hw *hw);
+-void igb_clear_vfta(struct e1000_hw *hw);
+-void igb_clear_vfta_i350(struct e1000_hw *hw);
+-s32 igb_vfta_set(struct e1000_hw *hw, u32 vid, bool add);
+-void igb_config_collision_dist(struct e1000_hw *hw);
+-void igb_init_rx_addrs(struct e1000_hw *hw, u16 rar_count);
+-void igb_mta_set(struct e1000_hw *hw, u32 hash_value);
+-void igb_put_hw_semaphore(struct e1000_hw *hw);
+-void igb_rar_set(struct e1000_hw *hw, u8 *addr, u32 index);
+-s32 igb_check_alt_mac_addr(struct e1000_hw *hw);
+-
+-bool igb_enable_mng_pass_thru(struct e1000_hw *hw);
+-
+-enum e1000_mng_mode {
+- e1000_mng_mode_none = 0,
+- e1000_mng_mode_asf,
+- e1000_mng_mode_pt,
+- e1000_mng_mode_ipmi,
+- e1000_mng_mode_host_if_only
+-};
++ The full GNU General Public License is included in this distribution in
++ the file called "COPYING".
+
+-#define E1000_FACTPS_MNGCG 0x20000000
++ Contact Information:
++ Linux NICS
++ e1000-devel Mailing List
++ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+-#define E1000_FWSM_MODE_MASK 0xE
+-#define E1000_FWSM_MODE_SHIFT 1
++*******************************************************************************/
+
+-#define E1000_MNG_DHCP_COOKIE_STATUS_VLAN 0x2
++#ifndef _E1000_MAC_H_
++#define _E1000_MAC_H_
+
+-void e1000_init_function_pointers_82575(struct e1000_hw *hw);
++void e1000_init_mac_ops_generic(struct e1000_hw *hw);
++#ifndef E1000_REMOVED
++#define E1000_REMOVED(a) (0)
++#endif /* E1000_REMOVED */
++void e1000_null_mac_generic(struct e1000_hw *hw);
++s32 e1000_null_ops_generic(struct e1000_hw *hw);
++s32 e1000_null_link_info(struct e1000_hw *hw, u16 *s, u16 *d);
++bool e1000_null_mng_mode(struct e1000_hw *hw);
++void e1000_null_update_mc(struct e1000_hw *hw, u8 *h, u32 a);
++void e1000_null_write_vfta(struct e1000_hw *hw, u32 a, u32 b);
++int e1000_null_rar_set(struct e1000_hw *hw, u8 *h, u32 a);
++s32 e1000_blink_led_generic(struct e1000_hw *hw);
++s32 e1000_check_for_copper_link_generic(struct e1000_hw *hw);
++s32 e1000_check_for_fiber_link_generic(struct e1000_hw *hw);
++s32 e1000_check_for_serdes_link_generic(struct e1000_hw *hw);
++s32 e1000_cleanup_led_generic(struct e1000_hw *hw);
++s32 e1000_config_fc_after_link_up_generic(struct e1000_hw *hw);
++s32 e1000_disable_pcie_master_generic(struct e1000_hw *hw);
++s32 e1000_force_mac_fc_generic(struct e1000_hw *hw);
++s32 e1000_get_auto_rd_done_generic(struct e1000_hw *hw);
++s32 igb_e1000_get_bus_info_pcie_generic(struct e1000_hw *hw);
++void igb_e1000_set_lan_id_single_port(struct e1000_hw *hw);
++s32 e1000_get_hw_semaphore_generic(struct e1000_hw *hw);
++s32 e1000_get_speed_and_duplex_copper_generic(struct e1000_hw *hw, u16 *speed,
++ u16 *duplex);
++s32 e1000_get_speed_and_duplex_fiber_serdes_generic(struct e1000_hw *hw,
++ u16 *speed, u16 *duplex);
++s32 e1000_id_led_init_generic(struct e1000_hw *hw);
++s32 e1000_led_on_generic(struct e1000_hw *hw);
++s32 e1000_led_off_generic(struct e1000_hw *hw);
++void e1000_update_mc_addr_list_generic(struct e1000_hw *hw,
++ u8 *mc_addr_list, u32 mc_addr_count);
++s32 e1000_set_fc_watermarks_generic(struct e1000_hw *hw);
++s32 e1000_setup_fiber_serdes_link_generic(struct e1000_hw *hw);
++s32 e1000_setup_led_generic(struct e1000_hw *hw);
++s32 e1000_setup_link_generic(struct e1000_hw *hw);
++s32 e1000_validate_mdi_setting_crossover_generic(struct e1000_hw *hw);
++s32 e1000_write_8bit_ctrl_reg_generic(struct e1000_hw *hw, u32 reg,
++ u32 offset, u8 data);
++
++u32 e1000_hash_mc_addr_generic(struct e1000_hw *hw, u8 *mc_addr);
++
++void e1000_clear_hw_cntrs_base_generic(struct e1000_hw *hw);
++void igb_e1000_clear_vfta_generic(struct e1000_hw *hw);
++void e1000_init_rx_addrs_generic(struct e1000_hw *hw, u16 rar_count);
++void e1000_pcix_mmrbc_workaround_generic(struct e1000_hw *hw);
++void e1000_put_hw_semaphore_generic(struct e1000_hw *hw);
++s32 igb_e1000_check_alt_mac_addr_generic(struct e1000_hw *hw);
++void e1000_reset_adaptive_generic(struct e1000_hw *hw);
++void e1000_set_pcie_no_snoop_generic(struct e1000_hw *hw, u32 no_snoop);
++void e1000_update_adaptive_generic(struct e1000_hw *hw);
++void igb_e1000_write_vfta_generic(struct e1000_hw *hw, u32 offset, u32 value);
+
+ #endif
+diff -Nu a/drivers/net/ethernet/intel/igb/e1000_manage.c b/drivers/net/ethernet/intel/igb/e1000_manage.c
+--- a/drivers/net/ethernet/intel/igb/e1000_manage.c 1970-01-01 00:00:00.000000000 +0000
++++ b/drivers/net/ethernet/intel/igb/e1000_manage.c 2016-11-14 14:32:08.579567168 +0000
+@@ -0,0 +1,552 @@
++/*******************************************************************************
++
++ Intel(R) Gigabit Ethernet Linux driver
++ Copyright(c) 2007-2015 Intel Corporation.
++
++ This program is free software; you can redistribute it and/or modify it
++ under the terms and conditions of the GNU General Public License,
++ version 2, as published by the Free Software Foundation.
++
++ This program is distributed in the hope it will be useful, but WITHOUT
++ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ more details.
++
++ The full GNU General Public License is included in this distribution in
++ the file called "COPYING".
++
++ Contact Information:
++ Linux NICS
++ e1000-devel Mailing List
++ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
++
++*******************************************************************************/
++
++#include "e1000_api.h"
++/**
++ * e1000_calculate_checksum - Calculate checksum for buffer
++ * @buffer: pointer to EEPROM
++ * @length: size of EEPROM to calculate a checksum for
++ *
++ * Calculates the checksum for some buffer on a specified length. The
++ * checksum calculated is returned.
++ **/
++u8 e1000_calculate_checksum(u8 *buffer, u32 length)
++{
++ u32 i;
++ u8 sum = 0;
++
++ DEBUGFUNC("e1000_calculate_checksum");
++
++ if (!buffer)
++ return 0;
++
++ for (i = 0; i < length; i++)
++ sum += buffer[i];
++
++ return (u8) (0 - sum);
++}
++
++/**
++ * e1000_mng_enable_host_if_generic - Checks host interface is enabled
++ * @hw: pointer to the HW structure
++ *
++ * Returns E1000_success upon success, else E1000_ERR_HOST_INTERFACE_COMMAND
++ *
++ * This function checks whether the HOST IF is enabled for command operation
++ * and also checks whether the previous command is completed. It busy waits
++ * in case of previous command is not completed.
++ **/
++s32 e1000_mng_enable_host_if_generic(struct e1000_hw *hw)
++{
++ u32 hicr;
++ u8 i;
++
++ DEBUGFUNC("e1000_mng_enable_host_if_generic");
++
++ if (!hw->mac.arc_subsystem_valid) {
++ DEBUGOUT("ARC subsystem not valid.\n");
++ return -E1000_ERR_HOST_INTERFACE_COMMAND;
++ }
++
++ /* Check that the host interface is enabled. */
++ hicr = E1000_READ_REG(hw, E1000_HICR);
++ if (!(hicr & E1000_HICR_EN)) {
++ DEBUGOUT("E1000_HOST_EN bit disabled.\n");
++ return -E1000_ERR_HOST_INTERFACE_COMMAND;
++ }
++ /* check the previous command is completed */
++ for (i = 0; i < E1000_MNG_DHCP_COMMAND_TIMEOUT; i++) {
++ hicr = E1000_READ_REG(hw, E1000_HICR);
++ if (!(hicr & E1000_HICR_C))
++ break;
++ msec_delay_irq(1);
++ }
++
++ if (i == E1000_MNG_DHCP_COMMAND_TIMEOUT) {
++ DEBUGOUT("Previous command timeout failed .\n");
++ return -E1000_ERR_HOST_INTERFACE_COMMAND;
++ }
++
++ return E1000_SUCCESS;
++}
++
++/**
++ * e1000_check_mng_mode_generic - Generic check management mode
++ * @hw: pointer to the HW structure
++ *
++ * Reads the firmware semaphore register and returns true (>0) if
++ * manageability is enabled, else false (0).
++ **/
++bool e1000_check_mng_mode_generic(struct e1000_hw *hw)
++{
++ u32 fwsm = E1000_READ_REG(hw, E1000_FWSM);
++
++ DEBUGFUNC("e1000_check_mng_mode_generic");
++
++
++ return (fwsm & E1000_FWSM_MODE_MASK) ==
++ (E1000_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT);
++}
++
++/**
++ * e1000_enable_tx_pkt_filtering_generic - Enable packet filtering on Tx
++ * @hw: pointer to the HW structure
++ *
++ * Enables packet filtering on transmit packets if manageability is enabled
++ * and host interface is enabled.
++ **/
++bool e1000_enable_tx_pkt_filtering_generic(struct e1000_hw *hw)
++{
++ struct e1000_host_mng_dhcp_cookie *hdr = &hw->mng_cookie;
++ u32 *buffer = (u32 *)&hw->mng_cookie;
++ u32 offset;
++ s32 ret_val, hdr_csum, csum;
++ u8 i, len;
++
++ DEBUGFUNC("e1000_enable_tx_pkt_filtering_generic");
++
++ hw->mac.tx_pkt_filtering = true;
++
++ /* No manageability, no filtering */
++ if (!hw->mac.ops.check_mng_mode(hw)) {
++ hw->mac.tx_pkt_filtering = false;
++ return hw->mac.tx_pkt_filtering;
++ }
++
++ /* If we can't read from the host interface for whatever
++ * reason, disable filtering.
++ */
++ ret_val = e1000_mng_enable_host_if_generic(hw);
++ if (ret_val != E1000_SUCCESS) {
++ hw->mac.tx_pkt_filtering = false;
++ return hw->mac.tx_pkt_filtering;
++ }
++
++ /* Read in the header. Length and offset are in dwords. */
++ len = E1000_MNG_DHCP_COOKIE_LENGTH >> 2;
++ offset = E1000_MNG_DHCP_COOKIE_OFFSET >> 2;
++ for (i = 0; i < len; i++)
++ *(buffer + i) = E1000_READ_REG_ARRAY_DWORD(hw, E1000_HOST_IF,
++ offset + i);
++ hdr_csum = hdr->checksum;
++ hdr->checksum = 0;
++ csum = e1000_calculate_checksum((u8 *)hdr,
++ E1000_MNG_DHCP_COOKIE_LENGTH);
++ /* If either the checksums or signature don't match, then
++ * the cookie area isn't considered valid, in which case we
++ * take the safe route of assuming Tx filtering is enabled.
++ */
++ if ((hdr_csum != csum) || (hdr->signature != E1000_IAMT_SIGNATURE)) {
++ hw->mac.tx_pkt_filtering = true;
++ return hw->mac.tx_pkt_filtering;
++ }
++
++ /* Cookie area is valid, make the final check for filtering. */
++ if (!(hdr->status & E1000_MNG_DHCP_COOKIE_STATUS_PARSING))
++ hw->mac.tx_pkt_filtering = false;
++
++ return hw->mac.tx_pkt_filtering;
++}
++
++/**
++ * e1000_mng_write_cmd_header_generic - Writes manageability command header
++ * @hw: pointer to the HW structure
++ * @hdr: pointer to the host interface command header
++ *
++ * Writes the command header after does the checksum calculation.
++ **/
++s32 e1000_mng_write_cmd_header_generic(struct e1000_hw *hw,
++ struct e1000_host_mng_command_header *hdr)
++{
++ u16 i, length = sizeof(struct e1000_host_mng_command_header);
++
++ DEBUGFUNC("e1000_mng_write_cmd_header_generic");
++
++ /* Write the whole command header structure with new checksum. */
++
++ hdr->checksum = e1000_calculate_checksum((u8 *)hdr, length);
++
++ length >>= 2;
++ /* Write the relevant command block into the ram area. */
++ for (i = 0; i < length; i++) {
++ E1000_WRITE_REG_ARRAY_DWORD(hw, E1000_HOST_IF, i,
++ *((u32 *) hdr + i));
++ E1000_WRITE_FLUSH(hw);
++ }
++
++ return E1000_SUCCESS;
++}
++
++/**
++ * e1000_mng_host_if_write_generic - Write to the manageability host interface
++ * @hw: pointer to the HW structure
++ * @buffer: pointer to the host interface buffer
++ * @length: size of the buffer
++ * @offset: location in the buffer to write to
++ * @sum: sum of the data (not checksum)
++ *
++ * This function writes the buffer content at the offset given on the host if.
++ * It also does alignment considerations to do the writes in most efficient
++ * way. Also fills up the sum of the buffer in *buffer parameter.
++ **/
++s32 e1000_mng_host_if_write_generic(struct e1000_hw *hw, u8 *buffer,
++ u16 length, u16 offset, u8 *sum)
++{
++ u8 *tmp;
++ u8 *bufptr = buffer;
++ u32 data = 0;
++ u16 remaining, i, j, prev_bytes;
++
++ DEBUGFUNC("e1000_mng_host_if_write_generic");
++
++ /* sum = only sum of the data and it is not checksum */
++
++ if (length == 0 || offset + length > E1000_HI_MAX_MNG_DATA_LENGTH)
++ return -E1000_ERR_PARAM;
++
++ tmp = (u8 *)&data;
++ prev_bytes = offset & 0x3;
++ offset >>= 2;
++
++ if (prev_bytes) {
++ data = E1000_READ_REG_ARRAY_DWORD(hw, E1000_HOST_IF, offset);
++ for (j = prev_bytes; j < sizeof(u32); j++) {
++ *(tmp + j) = *bufptr++;
++ *sum += *(tmp + j);
++ }
++ E1000_WRITE_REG_ARRAY_DWORD(hw, E1000_HOST_IF, offset, data);
++ length -= j - prev_bytes;
++ offset++;
++ }
++
++ remaining = length & 0x3;
++ length -= remaining;
++
++ /* Calculate length in DWORDs */
++ length >>= 2;
++
++ /* The device driver writes the relevant command block into the
++ * ram area.
++ */
++ for (i = 0; i < length; i++) {
++ for (j = 0; j < sizeof(u32); j++) {
++ *(tmp + j) = *bufptr++;
++ *sum += *(tmp + j);
++ }
++
++ E1000_WRITE_REG_ARRAY_DWORD(hw, E1000_HOST_IF, offset + i,
++ data);
++ }
++ if (remaining) {
++ for (j = 0; j < sizeof(u32); j++) {
++ if (j < remaining)
++ *(tmp + j) = *bufptr++;
++ else
++ *(tmp + j) = 0;
++
++ *sum += *(tmp + j);
++ }
++ E1000_WRITE_REG_ARRAY_DWORD(hw, E1000_HOST_IF, offset + i,
++ data);
++ }
++
++ return E1000_SUCCESS;
++}
++
++/**
++ * e1000_mng_write_dhcp_info_generic - Writes DHCP info to host interface
++ * @hw: pointer to the HW structure
++ * @buffer: pointer to the host interface
++ * @length: size of the buffer
++ *
++ * Writes the DHCP information to the host interface.
++ **/
++s32 e1000_mng_write_dhcp_info_generic(struct e1000_hw *hw, u8 *buffer,
++ u16 length)
++{
++ struct e1000_host_mng_command_header hdr;
++ s32 ret_val;
++ u32 hicr;
++
++ DEBUGFUNC("e1000_mng_write_dhcp_info_generic");
++
++ hdr.command_id = E1000_MNG_DHCP_TX_PAYLOAD_CMD;
++ hdr.command_length = length;
++ hdr.reserved1 = 0;
++ hdr.reserved2 = 0;
++ hdr.checksum = 0;
++
++ /* Enable the host interface */
++ ret_val = e1000_mng_enable_host_if_generic(hw);
++ if (ret_val)
++ return ret_val;
++
++ /* Populate the host interface with the contents of "buffer". */
++ ret_val = e1000_mng_host_if_write_generic(hw, buffer, length,
++ sizeof(hdr), &(hdr.checksum));
++ if (ret_val)
++ return ret_val;
++
++ /* Write the manageability command header */
++ ret_val = e1000_mng_write_cmd_header_generic(hw, &hdr);
++ if (ret_val)
++ return ret_val;
++
++ /* Tell the ARC a new command is pending. */
++ hicr = E1000_READ_REG(hw, E1000_HICR);
++ E1000_WRITE_REG(hw, E1000_HICR, hicr | E1000_HICR_C);
++
++ return E1000_SUCCESS;
++}
++
++/**
++ * igb_e1000_enable_mng_pass_thru - Check if management passthrough is needed
++ * @hw: pointer to the HW structure
++ *
++ * Verifies the hardware needs to leave interface enabled so that frames can
++ * be directed to and from the management interface.
++ **/
++/* Changed name, duplicated with e1000 */
++bool igb_e1000_enable_mng_pass_thru(struct e1000_hw *hw)
++{
++ u32 manc;
++ u32 fwsm, factps;
++
++ DEBUGFUNC("igb_e1000_enable_mng_pass_thru");
++
++ if (!hw->mac.asf_firmware_present)
++ return false;
++
++ manc = E1000_READ_REG(hw, E1000_MANC);
++
++ if (!(manc & E1000_MANC_RCV_TCO_EN))
++ return false;
++
++ if (hw->mac.has_fwsm) {
++ fwsm = E1000_READ_REG(hw, E1000_FWSM);
++ factps = E1000_READ_REG(hw, E1000_FACTPS);
++
++ if (!(factps & E1000_FACTPS_MNGCG) &&
++ ((fwsm & E1000_FWSM_MODE_MASK) ==
++ (e1000_mng_mode_pt << E1000_FWSM_MODE_SHIFT)))
++ return true;
++ } else if ((manc & E1000_MANC_SMBUS_EN) &&
++ !(manc & E1000_MANC_ASF_EN)) {
++ return true;
++ }
++
++ return false;
++}
++
++/**
++ * e1000_host_interface_command - Writes buffer to host interface
++ * @hw: pointer to the HW structure
++ * @buffer: contains a command to write
++ * @length: the byte length of the buffer, must be multiple of 4 bytes
++ *
++ * Writes a buffer to the Host Interface. Upon success, returns E1000_SUCCESS
++ * else returns E1000_ERR_HOST_INTERFACE_COMMAND.
++ **/
++s32 e1000_host_interface_command(struct e1000_hw *hw, u8 *buffer, u32 length)
++{
++ u32 hicr, i;
++
++ DEBUGFUNC("e1000_host_interface_command");
++
++ if (!(hw->mac.arc_subsystem_valid)) {
++ DEBUGOUT("Hardware doesn't support host interface command.\n");
++ return E1000_SUCCESS;
++ }
++
++ if (!hw->mac.asf_firmware_present) {
++ DEBUGOUT("Firmware is not present.\n");
++ return E1000_SUCCESS;
++ }
++
++ if (length == 0 || length & 0x3 ||
++ length > E1000_HI_MAX_BLOCK_BYTE_LENGTH) {
++ DEBUGOUT("Buffer length failure.\n");
++ return -E1000_ERR_HOST_INTERFACE_COMMAND;
++ }
++
++ /* Check that the host interface is enabled. */
++ hicr = E1000_READ_REG(hw, E1000_HICR);
++ if (!(hicr & E1000_HICR_EN)) {
++ DEBUGOUT("E1000_HOST_EN bit disabled.\n");
++ return -E1000_ERR_HOST_INTERFACE_COMMAND;
++ }
++
++ /* Calculate length in DWORDs */
++ length >>= 2;
++
++ /* The device driver writes the relevant command block
++ * into the ram area.
++ */
++ for (i = 0; i < length; i++)
++ E1000_WRITE_REG_ARRAY_DWORD(hw, E1000_HOST_IF, i,
++ *((u32 *)buffer + i));
++
++ /* Setting this bit tells the ARC that a new command is pending. */
++ E1000_WRITE_REG(hw, E1000_HICR, hicr | E1000_HICR_C);
++
++ for (i = 0; i < E1000_HI_COMMAND_TIMEOUT; i++) {
++ hicr = E1000_READ_REG(hw, E1000_HICR);
++ if (!(hicr & E1000_HICR_C))
++ break;
++ msec_delay(1);
++ }
++
++ /* Check command successful completion. */
++ if (i == E1000_HI_COMMAND_TIMEOUT ||
++ (!(E1000_READ_REG(hw, E1000_HICR) & E1000_HICR_SV))) {
++ DEBUGOUT("Command has failed with no status valid.\n");
++ return -E1000_ERR_HOST_INTERFACE_COMMAND;
++ }
++
++ for (i = 0; i < length; i++)
++ *((u32 *)buffer + i) = E1000_READ_REG_ARRAY_DWORD(hw,
++ E1000_HOST_IF,
++ i);
++
++ return E1000_SUCCESS;
++}
++/**
++ * e1000_load_firmware - Writes proxy FW code buffer to host interface
++ * and execute.
++ * @hw: pointer to the HW structure
++ * @buffer: contains a firmware to write
++ * @length: the byte length of the buffer, must be multiple of 4 bytes
++ *
++ * Upon success returns E1000_SUCCESS, returns E1000_ERR_CONFIG if not enabled
++ * in HW else returns E1000_ERR_HOST_INTERFACE_COMMAND.
++ **/
++s32 e1000_load_firmware(struct e1000_hw *hw, u8 *buffer, u32 length)
++{
++ u32 hicr, hibba, fwsm, icr, i;
++
++ DEBUGFUNC("e1000_load_firmware");
++
++ if (hw->mac.type < e1000_i210) {
++ DEBUGOUT("Hardware doesn't support loading FW by the driver\n");
++ return -E1000_ERR_CONFIG;
++ }
++
++ /* Check that the host interface is enabled. */
++ hicr = E1000_READ_REG(hw, E1000_HICR);
++ if (!(hicr & E1000_HICR_EN)) {
++ DEBUGOUT("E1000_HOST_EN bit disabled.\n");
++ return -E1000_ERR_CONFIG;
++ }
++ if (!(hicr & E1000_HICR_MEMORY_BASE_EN)) {
++ DEBUGOUT("E1000_HICR_MEMORY_BASE_EN bit disabled.\n");
++ return -E1000_ERR_CONFIG;
++ }
++
++ if (length == 0 || length & 0x3 || length > E1000_HI_FW_MAX_LENGTH) {
++ DEBUGOUT("Buffer length failure.\n");
++ return -E1000_ERR_INVALID_ARGUMENT;
++ }
++
++ /* Clear notification from ROM-FW by reading ICR register */
++ icr = E1000_READ_REG(hw, E1000_ICR_V2);
++
++ /* Reset ROM-FW */
++ hicr = E1000_READ_REG(hw, E1000_HICR);
++ hicr |= E1000_HICR_FW_RESET_ENABLE;
++ E1000_WRITE_REG(hw, E1000_HICR, hicr);
++ hicr |= E1000_HICR_FW_RESET;
++ E1000_WRITE_REG(hw, E1000_HICR, hicr);
++ E1000_WRITE_FLUSH(hw);
++
++ /* Wait till MAC notifies about its readiness after ROM-FW reset */
++ for (i = 0; i < (E1000_HI_COMMAND_TIMEOUT * 2); i++) {
++ icr = E1000_READ_REG(hw, E1000_ICR_V2);
++ if (icr & E1000_ICR_MNG)
++ break;
++ msec_delay(1);
++ }
++
++ /* Check for timeout */
++ if (i == E1000_HI_COMMAND_TIMEOUT) {
++ DEBUGOUT("FW reset failed.\n");
++ return -E1000_ERR_HOST_INTERFACE_COMMAND;
++ }
++
++ /* Wait till MAC is ready to accept new FW code */
++ for (i = 0; i < E1000_HI_COMMAND_TIMEOUT; i++) {
++ fwsm = E1000_READ_REG(hw, E1000_FWSM);
++ if ((fwsm & E1000_FWSM_FW_VALID) &&
++ ((fwsm & E1000_FWSM_MODE_MASK) >> E1000_FWSM_MODE_SHIFT ==
++ E1000_FWSM_HI_EN_ONLY_MODE))
++ break;
++ msec_delay(1);
++ }
++
++ /* Check for timeout */
++ if (i == E1000_HI_COMMAND_TIMEOUT) {
++ DEBUGOUT("FW reset failed.\n");
++ return -E1000_ERR_HOST_INTERFACE_COMMAND;
++ }
++
++ /* Calculate length in DWORDs */
++ length >>= 2;
++
++ /* The device driver writes the relevant FW code block
++ * into the ram area in DWORDs via 1kB ram addressing window.
++ */
++ for (i = 0; i < length; i++) {
++ if (!(i % E1000_HI_FW_BLOCK_DWORD_LENGTH)) {
++ /* Point to correct 1kB ram window */
++ hibba = E1000_HI_FW_BASE_ADDRESS +
++ ((E1000_HI_FW_BLOCK_DWORD_LENGTH << 2) *
++ (i / E1000_HI_FW_BLOCK_DWORD_LENGTH));
++
++ E1000_WRITE_REG(hw, E1000_HIBBA, hibba);
++ }
++
++ E1000_WRITE_REG_ARRAY_DWORD(hw, E1000_HOST_IF,
++ i % E1000_HI_FW_BLOCK_DWORD_LENGTH,
++ *((u32 *)buffer + i));
++ }
++
++ /* Setting this bit tells the ARC that a new FW is ready to execute. */
++ hicr = E1000_READ_REG(hw, E1000_HICR);
++ E1000_WRITE_REG(hw, E1000_HICR, hicr | E1000_HICR_C);
++
++ for (i = 0; i < E1000_HI_COMMAND_TIMEOUT; i++) {
++ hicr = E1000_READ_REG(hw, E1000_HICR);
++ if (!(hicr & E1000_HICR_C))
++ break;
++ msec_delay(1);
++ }
++
++ /* Check for successful FW start. */
++ if (i == E1000_HI_COMMAND_TIMEOUT) {
++ DEBUGOUT("New FW did not start within timeout period.\n");
++ return -E1000_ERR_HOST_INTERFACE_COMMAND;
++ }
++
++ return E1000_SUCCESS;
++}
++
+diff -Nu a/drivers/net/ethernet/intel/igb/e1000_manage.h b/drivers/net/ethernet/intel/igb/e1000_manage.h
+--- a/drivers/net/ethernet/intel/igb/e1000_manage.h 1970-01-01 00:00:00.000000000 +0000
++++ b/drivers/net/ethernet/intel/igb/e1000_manage.h 2016-11-14 14:32:08.579567168 +0000
+@@ -0,0 +1,86 @@
++/*******************************************************************************
++
++ Intel(R) Gigabit Ethernet Linux driver
++ Copyright(c) 2007-2015 Intel Corporation.
++
++ This program is free software; you can redistribute it and/or modify it
++ under the terms and conditions of the GNU General Public License,
++ version 2, as published by the Free Software Foundation.
++
++ This program is distributed in the hope it will be useful, but WITHOUT
++ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ more details.
++
++ The full GNU General Public License is included in this distribution in
++ the file called "COPYING".
++
++ Contact Information:
++ Linux NICS
++ e1000-devel Mailing List
++ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
++
++*******************************************************************************/
++
++#ifndef _E1000_MANAGE_H_
++#define _E1000_MANAGE_H_
++
++bool e1000_check_mng_mode_generic(struct e1000_hw *hw);
++bool e1000_enable_tx_pkt_filtering_generic(struct e1000_hw *hw);
++s32 e1000_mng_enable_host_if_generic(struct e1000_hw *hw);
++s32 e1000_mng_host_if_write_generic(struct e1000_hw *hw, u8 *buffer,
++ u16 length, u16 offset, u8 *sum);
++s32 e1000_mng_write_cmd_header_generic(struct e1000_hw *hw,
++ struct e1000_host_mng_command_header *hdr);
++s32 e1000_mng_write_dhcp_info_generic(struct e1000_hw *hw,
++ u8 *buffer, u16 length);
++bool igb_e1000_enable_mng_pass_thru(struct e1000_hw *hw);
++u8 e1000_calculate_checksum(u8 *buffer, u32 length);
++s32 e1000_host_interface_command(struct e1000_hw *hw, u8 *buffer, u32 length);
++s32 e1000_load_firmware(struct e1000_hw *hw, u8 *buffer, u32 length);
++
++enum e1000_mng_mode {
++ e1000_mng_mode_none = 0,
++ e1000_mng_mode_asf,
++ e1000_mng_mode_pt,
++ e1000_mng_mode_ipmi,
++ e1000_mng_mode_host_if_only
++};
++
++#define E1000_FACTPS_MNGCG 0x20000000
++
++#define E1000_FWSM_MODE_MASK 0xE
++#define E1000_FWSM_MODE_SHIFT 1
++#define E1000_FWSM_FW_VALID 0x00008000
++#define E1000_FWSM_HI_EN_ONLY_MODE 0x4
++
++#define E1000_MNG_IAMT_MODE 0x3
++#define E1000_MNG_DHCP_COOKIE_LENGTH 0x10
++#define E1000_MNG_DHCP_COOKIE_OFFSET 0x6F0
++#define E1000_MNG_DHCP_COMMAND_TIMEOUT 10
++#define E1000_MNG_DHCP_TX_PAYLOAD_CMD 64
++#define E1000_MNG_DHCP_COOKIE_STATUS_PARSING 0x1
++#define E1000_MNG_DHCP_COOKIE_STATUS_VLAN 0x2
++
++#define E1000_VFTA_ENTRY_SHIFT 5
++#define E1000_VFTA_ENTRY_MASK 0x7F
++#define E1000_VFTA_ENTRY_BIT_SHIFT_MASK 0x1F
++
++#define E1000_HI_MAX_BLOCK_BYTE_LENGTH 1792 /* Num of bytes in range */
++#define E1000_HI_MAX_BLOCK_DWORD_LENGTH 448 /* Num of dwords in range */
++#define E1000_HI_COMMAND_TIMEOUT 500 /* Process HI cmd limit */
++#define E1000_HI_FW_BASE_ADDRESS 0x10000
++#define E1000_HI_FW_MAX_LENGTH (64 * 1024) /* Num of bytes */
++#define E1000_HI_FW_BLOCK_DWORD_LENGTH 256 /* Num of DWORDs per page */
++#define E1000_HICR_MEMORY_BASE_EN 0x200 /* MB Enable bit - RO */
++#define E1000_HICR_EN 0x01 /* Enable bit - RO */
++/* Driver sets this bit when done to put command in RAM */
++#define E1000_HICR_C 0x02
++#define E1000_HICR_SV 0x04 /* Status Validity */
++#define E1000_HICR_FW_RESET_ENABLE 0x40
++#define E1000_HICR_FW_RESET 0x80
++
++/* Intel(R) Active Management Technology signature */
++#define E1000_IAMT_SIGNATURE 0x544D4149
++
++#endif
+diff -Nu a/drivers/net/ethernet/intel/igb/e1000_mbx.c b/drivers/net/ethernet/intel/igb/e1000_mbx.c
+--- a/drivers/net/ethernet/intel/igb/e1000_mbx.c 2016-11-13 09:20:24.790171605 +0000
++++ b/drivers/net/ethernet/intel/igb/e1000_mbx.c 2016-11-14 14:32:08.579567168 +0000
+@@ -1,42 +1,71 @@
+-/* Intel(R) Gigabit Ethernet Linux driver
+- * Copyright(c) 2007-2014 Intel Corporation.
+- *
+- * This program is free software; you can redistribute it and/or modify it
+- * under the terms and conditions of the GNU General Public License,
+- * version 2, as published by the Free Software Foundation.
+- *
+- * This program is distributed in the hope it will be useful, but WITHOUT
+- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+- * more details.
+- *
+- * You should have received a copy of the GNU General Public License along with
+- * this program; if not, see .
+- *
+- * The full GNU General Public License is included in this distribution in
+- * the file called "COPYING".
+- *
+- * Contact Information:
+- * e1000-devel Mailing List
+- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+- */
++/*******************************************************************************
++
++ Intel(R) Gigabit Ethernet Linux driver
++ Copyright(c) 2007-2015 Intel Corporation.
++
++ This program is free software; you can redistribute it and/or modify it
++ under the terms and conditions of the GNU General Public License,
++ version 2, as published by the Free Software Foundation.
++
++ This program is distributed in the hope it will be useful, but WITHOUT
++ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ more details.
++
++ The full GNU General Public License is included in this distribution in
++ the file called "COPYING".
++
++ Contact Information:
++ Linux NICS
++ e1000-devel Mailing List
++ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
++
++*******************************************************************************/
+
+ #include "e1000_mbx.h"
+
+ /**
+- * igb_read_mbx - Reads a message from the mailbox
++ * e1000_null_mbx_check_for_flag - No-op function, return 0
++ * @hw: pointer to the HW structure
++ **/
++static s32 e1000_null_mbx_check_for_flag(struct e1000_hw E1000_UNUSEDARG *hw,
++ u16 E1000_UNUSEDARG mbx_id)
++{
++ DEBUGFUNC("e1000_null_mbx_check_flag");
++
++ return E1000_SUCCESS;
++}
++
++/**
++ * e1000_null_mbx_transact - No-op function, return 0
++ * @hw: pointer to the HW structure
++ **/
++static s32 e1000_null_mbx_transact(struct e1000_hw E1000_UNUSEDARG *hw,
++ u32 E1000_UNUSEDARG *msg,
++ u16 E1000_UNUSEDARG size,
++ u16 E1000_UNUSEDARG mbx_id)
++{
++ DEBUGFUNC("e1000_null_mbx_rw_msg");
++
++ return E1000_SUCCESS;
++}
++
++/**
++ * e1000_read_mbx - Reads a message from the mailbox
+ * @hw: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+ * @mbx_id: id of mailbox to read
+ *
+- * returns SUCCESS if it successfully read message from buffer
++ * returns SUCCESS if it successfuly read message from buffer
+ **/
+-s32 igb_read_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id)
++s32 e1000_read_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id)
+ {
+ struct e1000_mbx_info *mbx = &hw->mbx;
+ s32 ret_val = -E1000_ERR_MBX;
+
++ DEBUGFUNC("e1000_read_mbx");
++
+ /* limit read to size of mailbox */
+ if (size > mbx->size)
+ size = mbx->size;
+@@ -48,7 +77,7 @@
+ }
+
+ /**
+- * igb_write_mbx - Write a message to the mailbox
++ * e1000_write_mbx - Write a message to the mailbox
+ * @hw: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+@@ -56,10 +85,12 @@
+ *
+ * returns SUCCESS if it successfully copied message into the buffer
+ **/
+-s32 igb_write_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id)
++s32 e1000_write_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id)
+ {
+ struct e1000_mbx_info *mbx = &hw->mbx;
+- s32 ret_val = 0;
++ s32 ret_val = E1000_SUCCESS;
++
++ DEBUGFUNC("e1000_write_mbx");
+
+ if (size > mbx->size)
+ ret_val = -E1000_ERR_MBX;
+@@ -71,17 +102,19 @@
+ }
+
+ /**
+- * igb_check_for_msg - checks to see if someone sent us mail
++ * e1000_check_for_msg - checks to see if someone sent us mail
+ * @hw: pointer to the HW structure
+ * @mbx_id: id of mailbox to check
+ *
+ * returns SUCCESS if the Status bit was found or else ERR_MBX
+ **/
+-s32 igb_check_for_msg(struct e1000_hw *hw, u16 mbx_id)
++s32 e1000_check_for_msg(struct e1000_hw *hw, u16 mbx_id)
+ {
+ struct e1000_mbx_info *mbx = &hw->mbx;
+ s32 ret_val = -E1000_ERR_MBX;
+
++ DEBUGFUNC("e1000_check_for_msg");
++
+ if (mbx->ops.check_for_msg)
+ ret_val = mbx->ops.check_for_msg(hw, mbx_id);
+
+@@ -89,17 +122,19 @@
+ }
+
+ /**
+- * igb_check_for_ack - checks to see if someone sent us ACK
++ * e1000_check_for_ack - checks to see if someone sent us ACK
+ * @hw: pointer to the HW structure
+ * @mbx_id: id of mailbox to check
+ *
+ * returns SUCCESS if the Status bit was found or else ERR_MBX
+ **/
+-s32 igb_check_for_ack(struct e1000_hw *hw, u16 mbx_id)
++s32 e1000_check_for_ack(struct e1000_hw *hw, u16 mbx_id)
+ {
+ struct e1000_mbx_info *mbx = &hw->mbx;
+ s32 ret_val = -E1000_ERR_MBX;
+
++ DEBUGFUNC("e1000_check_for_ack");
++
+ if (mbx->ops.check_for_ack)
+ ret_val = mbx->ops.check_for_ack(hw, mbx_id);
+
+@@ -107,17 +142,19 @@
+ }
+
+ /**
+- * igb_check_for_rst - checks to see if other side has reset
++ * e1000_check_for_rst - checks to see if other side has reset
+ * @hw: pointer to the HW structure
+ * @mbx_id: id of mailbox to check
+ *
+ * returns SUCCESS if the Status bit was found or else ERR_MBX
+ **/
+-s32 igb_check_for_rst(struct e1000_hw *hw, u16 mbx_id)
++s32 e1000_check_for_rst(struct e1000_hw *hw, u16 mbx_id)
+ {
+ struct e1000_mbx_info *mbx = &hw->mbx;
+ s32 ret_val = -E1000_ERR_MBX;
+
++ DEBUGFUNC("e1000_check_for_rst");
++
+ if (mbx->ops.check_for_rst)
+ ret_val = mbx->ops.check_for_rst(hw, mbx_id);
+
+@@ -125,17 +162,19 @@
+ }
+
+ /**
+- * igb_poll_for_msg - Wait for message notification
++ * e1000_poll_for_msg - Wait for message notification
+ * @hw: pointer to the HW structure
+ * @mbx_id: id of mailbox to write
+ *
+ * returns SUCCESS if it successfully received a message notification
+ **/
+-static s32 igb_poll_for_msg(struct e1000_hw *hw, u16 mbx_id)
++static s32 e1000_poll_for_msg(struct e1000_hw *hw, u16 mbx_id)
+ {
+ struct e1000_mbx_info *mbx = &hw->mbx;
+ int countdown = mbx->timeout;
+
++ DEBUGFUNC("e1000_poll_for_msg");
++
+ if (!countdown || !mbx->ops.check_for_msg)
+ goto out;
+
+@@ -143,28 +182,30 @@
+ countdown--;
+ if (!countdown)
+ break;
+- udelay(mbx->usec_delay);
++ usec_delay(mbx->usec_delay);
+ }
+
+ /* if we failed, all future posted messages fail until reset */
+ if (!countdown)
+ mbx->timeout = 0;
+ out:
+- return countdown ? 0 : -E1000_ERR_MBX;
++ return countdown ? E1000_SUCCESS : -E1000_ERR_MBX;
+ }
+
+ /**
+- * igb_poll_for_ack - Wait for message acknowledgement
++ * e1000_poll_for_ack - Wait for message acknowledgement
+ * @hw: pointer to the HW structure
+ * @mbx_id: id of mailbox to write
+ *
+ * returns SUCCESS if it successfully received a message acknowledgement
+ **/
+-static s32 igb_poll_for_ack(struct e1000_hw *hw, u16 mbx_id)
++static s32 e1000_poll_for_ack(struct e1000_hw *hw, u16 mbx_id)
+ {
+ struct e1000_mbx_info *mbx = &hw->mbx;
+ int countdown = mbx->timeout;
+
++ DEBUGFUNC("e1000_poll_for_ack");
++
+ if (!countdown || !mbx->ops.check_for_ack)
+ goto out;
+
+@@ -172,18 +213,18 @@
+ countdown--;
+ if (!countdown)
+ break;
+- udelay(mbx->usec_delay);
++ usec_delay(mbx->usec_delay);
+ }
+
+ /* if we failed, all future posted messages fail until reset */
+ if (!countdown)
+ mbx->timeout = 0;
+ out:
+- return countdown ? 0 : -E1000_ERR_MBX;
++ return countdown ? E1000_SUCCESS : -E1000_ERR_MBX;
+ }
+
+ /**
+- * igb_read_posted_mbx - Wait for message notification and receive message
++ * e1000_read_posted_mbx - Wait for message notification and receive message
+ * @hw: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+@@ -192,17 +233,19 @@
+ * returns SUCCESS if it successfully received a message notification and
+ * copied it into the receive buffer.
+ **/
+-static s32 igb_read_posted_mbx(struct e1000_hw *hw, u32 *msg, u16 size,
+- u16 mbx_id)
++s32 e1000_read_posted_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id)
+ {
+ struct e1000_mbx_info *mbx = &hw->mbx;
+ s32 ret_val = -E1000_ERR_MBX;
+
++ DEBUGFUNC("e1000_read_posted_mbx");
++
+ if (!mbx->ops.read)
+ goto out;
+
+- ret_val = igb_poll_for_msg(hw, mbx_id);
++ ret_val = e1000_poll_for_msg(hw, mbx_id);
+
++ /* if ack received read message, otherwise we timed out */
+ if (!ret_val)
+ ret_val = mbx->ops.read(hw, msg, size, mbx_id);
+ out:
+@@ -210,7 +253,7 @@
+ }
+
+ /**
+- * igb_write_posted_mbx - Write a message to the mailbox, wait for ack
++ * e1000_write_posted_mbx - Write a message to the mailbox, wait for ack
+ * @hw: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+@@ -219,12 +262,13 @@
+ * returns SUCCESS if it successfully copied message into the buffer and
+ * received an ack to that message within delay * timeout period
+ **/
+-static s32 igb_write_posted_mbx(struct e1000_hw *hw, u32 *msg, u16 size,
+- u16 mbx_id)
++s32 e1000_write_posted_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id)
+ {
+ struct e1000_mbx_info *mbx = &hw->mbx;
+ s32 ret_val = -E1000_ERR_MBX;
+
++ DEBUGFUNC("e1000_write_posted_mbx");
++
+ /* exit if either we can't write or there isn't a defined timeout */
+ if (!mbx->ops.write || !mbx->timeout)
+ goto out;
+@@ -234,37 +278,58 @@
+
+ /* if msg sent wait until we receive an ack */
+ if (!ret_val)
+- ret_val = igb_poll_for_ack(hw, mbx_id);
++ ret_val = e1000_poll_for_ack(hw, mbx_id);
+ out:
+ return ret_val;
+ }
+
+-static s32 igb_check_for_bit_pf(struct e1000_hw *hw, u32 mask)
++/**
++ * e1000_init_mbx_ops_generic - Initialize mbx function pointers
++ * @hw: pointer to the HW structure
++ *
++ * Sets the function pointers to no-op functions
++ **/
++void e1000_init_mbx_ops_generic(struct e1000_hw *hw)
+ {
+- u32 mbvficr = rd32(E1000_MBVFICR);
++ struct e1000_mbx_info *mbx = &hw->mbx;
++ mbx->ops.init_params = e1000_null_ops_generic;
++ mbx->ops.read = e1000_null_mbx_transact;
++ mbx->ops.write = e1000_null_mbx_transact;
++ mbx->ops.check_for_msg = e1000_null_mbx_check_for_flag;
++ mbx->ops.check_for_ack = e1000_null_mbx_check_for_flag;
++ mbx->ops.check_for_rst = e1000_null_mbx_check_for_flag;
++ mbx->ops.read_posted = e1000_read_posted_mbx;
++ mbx->ops.write_posted = e1000_write_posted_mbx;
++}
++
++static s32 e1000_check_for_bit_pf(struct e1000_hw *hw, u32 mask)
++{
++ u32 mbvficr = E1000_READ_REG(hw, E1000_MBVFICR);
+ s32 ret_val = -E1000_ERR_MBX;
+
+ if (mbvficr & mask) {
+- ret_val = 0;
+- wr32(E1000_MBVFICR, mask);
++ ret_val = E1000_SUCCESS;
++ E1000_WRITE_REG(hw, E1000_MBVFICR, mask);
+ }
+
+ return ret_val;
+ }
+
+ /**
+- * igb_check_for_msg_pf - checks to see if the VF has sent mail
++ * e1000_check_for_msg_pf - checks to see if the VF has sent mail
+ * @hw: pointer to the HW structure
+ * @vf_number: the VF index
+ *
+ * returns SUCCESS if the VF has set the Status bit or else ERR_MBX
+ **/
+-static s32 igb_check_for_msg_pf(struct e1000_hw *hw, u16 vf_number)
++static s32 e1000_check_for_msg_pf(struct e1000_hw *hw, u16 vf_number)
+ {
+ s32 ret_val = -E1000_ERR_MBX;
+
+- if (!igb_check_for_bit_pf(hw, E1000_MBVFICR_VFREQ_VF1 << vf_number)) {
+- ret_val = 0;
++ DEBUGFUNC("e1000_check_for_msg_pf");
++
++ if (!e1000_check_for_bit_pf(hw, E1000_MBVFICR_VFREQ_VF1 << vf_number)) {
++ ret_val = E1000_SUCCESS;
+ hw->mbx.stats.reqs++;
+ }
+
+@@ -272,18 +337,20 @@
+ }
+
+ /**
+- * igb_check_for_ack_pf - checks to see if the VF has ACKed
++ * e1000_check_for_ack_pf - checks to see if the VF has ACKed
+ * @hw: pointer to the HW structure
+ * @vf_number: the VF index
+ *
+ * returns SUCCESS if the VF has set the Status bit or else ERR_MBX
+ **/
+-static s32 igb_check_for_ack_pf(struct e1000_hw *hw, u16 vf_number)
++static s32 e1000_check_for_ack_pf(struct e1000_hw *hw, u16 vf_number)
+ {
+ s32 ret_val = -E1000_ERR_MBX;
+
+- if (!igb_check_for_bit_pf(hw, E1000_MBVFICR_VFACK_VF1 << vf_number)) {
+- ret_val = 0;
++ DEBUGFUNC("e1000_check_for_ack_pf");
++
++ if (!e1000_check_for_bit_pf(hw, E1000_MBVFICR_VFACK_VF1 << vf_number)) {
++ ret_val = E1000_SUCCESS;
+ hw->mbx.stats.acks++;
+ }
+
+@@ -291,20 +358,22 @@
+ }
+
+ /**
+- * igb_check_for_rst_pf - checks to see if the VF has reset
++ * e1000_check_for_rst_pf - checks to see if the VF has reset
+ * @hw: pointer to the HW structure
+ * @vf_number: the VF index
+ *
+ * returns SUCCESS if the VF has set the Status bit or else ERR_MBX
+ **/
+-static s32 igb_check_for_rst_pf(struct e1000_hw *hw, u16 vf_number)
++static s32 e1000_check_for_rst_pf(struct e1000_hw *hw, u16 vf_number)
+ {
+- u32 vflre = rd32(E1000_VFLRE);
++ u32 vflre = E1000_READ_REG(hw, E1000_VFLRE);
+ s32 ret_val = -E1000_ERR_MBX;
+
++ DEBUGFUNC("e1000_check_for_rst_pf");
++
+ if (vflre & (1 << vf_number)) {
+- ret_val = 0;
+- wr32(E1000_VFLRE, (1 << vf_number));
++ ret_val = E1000_SUCCESS;
++ E1000_WRITE_REG(hw, E1000_VFLRE, (1 << vf_number));
+ hw->mbx.stats.rsts++;
+ }
+
+@@ -312,30 +381,40 @@
+ }
+
+ /**
+- * igb_obtain_mbx_lock_pf - obtain mailbox lock
++ * e1000_obtain_mbx_lock_pf - obtain mailbox lock
+ * @hw: pointer to the HW structure
+ * @vf_number: the VF index
+ *
+ * return SUCCESS if we obtained the mailbox lock
+ **/
+-static s32 igb_obtain_mbx_lock_pf(struct e1000_hw *hw, u16 vf_number)
++static s32 e1000_obtain_mbx_lock_pf(struct e1000_hw *hw, u16 vf_number)
+ {
+ s32 ret_val = -E1000_ERR_MBX;
+ u32 p2v_mailbox;
++ int count = 10;
+
+- /* Take ownership of the buffer */
+- wr32(E1000_P2VMAILBOX(vf_number), E1000_P2VMAILBOX_PFU);
++ DEBUGFUNC("e1000_obtain_mbx_lock_pf");
+
+- /* reserve mailbox for vf use */
+- p2v_mailbox = rd32(E1000_P2VMAILBOX(vf_number));
+- if (p2v_mailbox & E1000_P2VMAILBOX_PFU)
+- ret_val = 0;
++ do {
++ /* Take ownership of the buffer */
++ E1000_WRITE_REG(hw, E1000_P2VMAILBOX(vf_number),
++ E1000_P2VMAILBOX_PFU);
++
++ /* reserve mailbox for pf use */
++ p2v_mailbox = E1000_READ_REG(hw, E1000_P2VMAILBOX(vf_number));
++ if (p2v_mailbox & E1000_P2VMAILBOX_PFU) {
++ ret_val = E1000_SUCCESS;
++ break;
++ }
++ usec_delay(1000);
++ } while (count-- > 0);
+
+ return ret_val;
++
+ }
+
+ /**
+- * igb_write_mbx_pf - Places a message in the mailbox
++ * e1000_write_mbx_pf - Places a message in the mailbox
+ * @hw: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+@@ -343,27 +422,29 @@
+ *
+ * returns SUCCESS if it successfully copied message into the buffer
+ **/
+-static s32 igb_write_mbx_pf(struct e1000_hw *hw, u32 *msg, u16 size,
+- u16 vf_number)
++static s32 e1000_write_mbx_pf(struct e1000_hw *hw, u32 *msg, u16 size,
++ u16 vf_number)
+ {
+ s32 ret_val;
+ u16 i;
+
++ DEBUGFUNC("e1000_write_mbx_pf");
++
+ /* lock the mailbox to prevent pf/vf race condition */
+- ret_val = igb_obtain_mbx_lock_pf(hw, vf_number);
++ ret_val = e1000_obtain_mbx_lock_pf(hw, vf_number);
+ if (ret_val)
+ goto out_no_write;
+
+ /* flush msg and acks as we are overwriting the message buffer */
+- igb_check_for_msg_pf(hw, vf_number);
+- igb_check_for_ack_pf(hw, vf_number);
++ e1000_check_for_msg_pf(hw, vf_number);
++ e1000_check_for_ack_pf(hw, vf_number);
+
+ /* copy the caller specified message to the mailbox memory buffer */
+ for (i = 0; i < size; i++)
+- array_wr32(E1000_VMBMEM(vf_number), i, msg[i]);
++ E1000_WRITE_REG_ARRAY(hw, E1000_VMBMEM(vf_number), i, msg[i]);
+
+ /* Interrupt VF to tell it a message has been sent and release buffer*/
+- wr32(E1000_P2VMAILBOX(vf_number), E1000_P2VMAILBOX_STS);
++ E1000_WRITE_REG(hw, E1000_P2VMAILBOX(vf_number), E1000_P2VMAILBOX_STS);
+
+ /* update stats */
+ hw->mbx.stats.msgs_tx++;
+@@ -374,7 +455,7 @@
+ }
+
+ /**
+- * igb_read_mbx_pf - Read a message from the mailbox
++ * e1000_read_mbx_pf - Read a message from the mailbox
+ * @hw: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+@@ -384,23 +465,25 @@
+ * memory buffer. The presumption is that the caller knows that there was
+ * a message due to a VF request so no polling for message is needed.
+ **/
+-static s32 igb_read_mbx_pf(struct e1000_hw *hw, u32 *msg, u16 size,
+- u16 vf_number)
++static s32 e1000_read_mbx_pf(struct e1000_hw *hw, u32 *msg, u16 size,
++ u16 vf_number)
+ {
+ s32 ret_val;
+ u16 i;
+
++ DEBUGFUNC("e1000_read_mbx_pf");
++
+ /* lock the mailbox to prevent pf/vf race condition */
+- ret_val = igb_obtain_mbx_lock_pf(hw, vf_number);
++ ret_val = e1000_obtain_mbx_lock_pf(hw, vf_number);
+ if (ret_val)
+ goto out_no_read;
+
+ /* copy the message to the mailbox memory buffer */
+ for (i = 0; i < size; i++)
+- msg[i] = array_rd32(E1000_VMBMEM(vf_number), i);
++ msg[i] = E1000_READ_REG_ARRAY(hw, E1000_VMBMEM(vf_number), i);
+
+ /* Acknowledge the message and release buffer */
+- wr32(E1000_P2VMAILBOX(vf_number), E1000_P2VMAILBOX_ACK);
++ E1000_WRITE_REG(hw, E1000_P2VMAILBOX(vf_number), E1000_P2VMAILBOX_ACK);
+
+ /* update stats */
+ hw->mbx.stats.msgs_rx++;
+@@ -415,29 +498,34 @@
+ *
+ * Initializes the hw->mbx struct to correct values for pf mailbox
+ */
+-s32 igb_init_mbx_params_pf(struct e1000_hw *hw)
++s32 e1000_init_mbx_params_pf(struct e1000_hw *hw)
+ {
+ struct e1000_mbx_info *mbx = &hw->mbx;
+
+- mbx->timeout = 0;
+- mbx->usec_delay = 0;
+-
+- mbx->size = E1000_VFMAILBOX_SIZE;
+-
+- mbx->ops.read = igb_read_mbx_pf;
+- mbx->ops.write = igb_write_mbx_pf;
+- mbx->ops.read_posted = igb_read_posted_mbx;
+- mbx->ops.write_posted = igb_write_posted_mbx;
+- mbx->ops.check_for_msg = igb_check_for_msg_pf;
+- mbx->ops.check_for_ack = igb_check_for_ack_pf;
+- mbx->ops.check_for_rst = igb_check_for_rst_pf;
++ switch (hw->mac.type) {
++ case e1000_82576:
++ case e1000_i350:
++ case e1000_i354:
++ mbx->timeout = 0;
++ mbx->usec_delay = 0;
+
+- mbx->stats.msgs_tx = 0;
+- mbx->stats.msgs_rx = 0;
+- mbx->stats.reqs = 0;
+- mbx->stats.acks = 0;
+- mbx->stats.rsts = 0;
++ mbx->size = E1000_VFMAILBOX_SIZE;
+
+- return 0;
++ mbx->ops.read = e1000_read_mbx_pf;
++ mbx->ops.write = e1000_write_mbx_pf;
++ mbx->ops.read_posted = e1000_read_posted_mbx;
++ mbx->ops.write_posted = e1000_write_posted_mbx;
++ mbx->ops.check_for_msg = e1000_check_for_msg_pf;
++ mbx->ops.check_for_ack = e1000_check_for_ack_pf;
++ mbx->ops.check_for_rst = e1000_check_for_rst_pf;
++
++ mbx->stats.msgs_tx = 0;
++ mbx->stats.msgs_rx = 0;
++ mbx->stats.reqs = 0;
++ mbx->stats.acks = 0;
++ mbx->stats.rsts = 0;
++ default:
++ return E1000_SUCCESS;
++ }
+ }
+
+diff -Nu a/drivers/net/ethernet/intel/igb/e1000_mbx.h b/drivers/net/ethernet/intel/igb/e1000_mbx.h
+--- a/drivers/net/ethernet/intel/igb/e1000_mbx.h 2016-11-13 09:20:24.790171605 +0000
++++ b/drivers/net/ethernet/intel/igb/e1000_mbx.h 2016-11-14 14:32:08.579567168 +0000
+@@ -1,30 +1,31 @@
+-/* Intel(R) Gigabit Ethernet Linux driver
+- * Copyright(c) 2007-2014 Intel Corporation.
+- *
+- * This program is free software; you can redistribute it and/or modify it
+- * under the terms and conditions of the GNU General Public License,
+- * version 2, as published by the Free Software Foundation.
+- *
+- * This program is distributed in the hope it will be useful, but WITHOUT
+- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+- * more details.
+- *
+- * You should have received a copy of the GNU General Public License along with
+- * this program; if not, see .
+- *
+- * The full GNU General Public License is included in this distribution in
+- * the file called "COPYING".
+- *
+- * Contact Information:
+- * e1000-devel Mailing List
+- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+- */
++/*******************************************************************************
++
++ Intel(R) Gigabit Ethernet Linux driver
++ Copyright(c) 2007-2015 Intel Corporation.
++
++ This program is free software; you can redistribute it and/or modify it
++ under the terms and conditions of the GNU General Public License,
++ version 2, as published by the Free Software Foundation.
++
++ This program is distributed in the hope it will be useful, but WITHOUT
++ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ more details.
++
++ The full GNU General Public License is included in this distribution in
++ the file called "COPYING".
++
++ Contact Information:
++ Linux NICS
++ e1000-devel Mailing List
++ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
++
++*******************************************************************************/
+
+ #ifndef _E1000_MBX_H_
+ #define _E1000_MBX_H_
+
+-#include "e1000_hw.h"
++#include "e1000_api.h"
+
+ #define E1000_P2VMAILBOX_STS 0x00000001 /* Initiate message send to VF */
+ #define E1000_P2VMAILBOX_ACK 0x00000002 /* Ack message recv'd from VF */
+@@ -32,10 +33,10 @@
+ #define E1000_P2VMAILBOX_PFU 0x00000008 /* PF owns the mailbox buffer */
+ #define E1000_P2VMAILBOX_RVFU 0x00000010 /* Reset VFU - used when VF stuck */
+
+-#define E1000_MBVFICR_VFREQ_MASK 0x000000FF /* bits for VF messages */
+-#define E1000_MBVFICR_VFREQ_VF1 0x00000001 /* bit for VF 1 message */
+-#define E1000_MBVFICR_VFACK_MASK 0x00FF0000 /* bits for VF acks */
+-#define E1000_MBVFICR_VFACK_VF1 0x00010000 /* bit for VF 1 ack */
++#define E1000_MBVFICR_VFREQ_MASK 0x000000FF /* bits for VF messages */
++#define E1000_MBVFICR_VFREQ_VF1 0x00000001 /* bit for VF 1 message */
++#define E1000_MBVFICR_VFACK_MASK 0x00FF0000 /* bits for VF acks */
++#define E1000_MBVFICR_VFACK_VF1 0x00010000 /* bit for VF 1 ack */
+
+ #define E1000_VFMAILBOX_SIZE 16 /* 16 32 bit words - 64 bytes */
+
+@@ -43,31 +44,41 @@
+ * PF. The reverse is true if it is E1000_PF_*.
+ * Message ACK's are the value or'd with 0xF0000000
+ */
+-/* Messages below or'd with this are the ACK */
++/* Msgs below or'd with this are the ACK */
+ #define E1000_VT_MSGTYPE_ACK 0x80000000
+-/* Messages below or'd with this are the NACK */
++/* Msgs below or'd with this are the NACK */
+ #define E1000_VT_MSGTYPE_NACK 0x40000000
+ /* Indicates that VF is still clear to send requests */
+ #define E1000_VT_MSGTYPE_CTS 0x20000000
+ #define E1000_VT_MSGINFO_SHIFT 16
+-/* bits 23:16 are used for exra info for certain messages */
++/* bits 23:16 are used for extra info for certain messages */
+ #define E1000_VT_MSGINFO_MASK (0xFF << E1000_VT_MSGINFO_SHIFT)
+
+-#define E1000_VF_RESET 0x01 /* VF requests reset */
+-#define E1000_VF_SET_MAC_ADDR 0x02 /* VF requests to set MAC addr */
+-#define E1000_VF_SET_MULTICAST 0x03 /* VF requests to set MC addr */
+-#define E1000_VF_SET_VLAN 0x04 /* VF requests to set VLAN */
+-#define E1000_VF_SET_LPE 0x05 /* VF requests to set VMOLR.LPE */
+-#define E1000_VF_SET_PROMISC 0x06 /*VF requests to clear VMOLR.ROPE/MPME*/
++#define E1000_VF_RESET 0x01 /* VF requests reset */
++#define E1000_VF_SET_MAC_ADDR 0x02 /* VF requests to set MAC addr */
++#define E1000_VF_SET_MULTICAST 0x03 /* VF requests to set MC addr */
++#define E1000_VF_SET_MULTICAST_COUNT_MASK (0x1F << E1000_VT_MSGINFO_SHIFT)
++#define E1000_VF_SET_MULTICAST_OVERFLOW (0x80 << E1000_VT_MSGINFO_SHIFT)
++#define E1000_VF_SET_VLAN 0x04 /* VF requests to set VLAN */
++#define E1000_VF_SET_VLAN_ADD (0x01 << E1000_VT_MSGINFO_SHIFT)
++#define E1000_VF_SET_LPE 0x05 /* reqs to set VMOLR.LPE */
++#define E1000_VF_SET_PROMISC 0x06 /* reqs to clear VMOLR.ROPE/MPME*/
++#define E1000_VF_SET_PROMISC_UNICAST (0x01 << E1000_VT_MSGINFO_SHIFT)
+ #define E1000_VF_SET_PROMISC_MULTICAST (0x02 << E1000_VT_MSGINFO_SHIFT)
+
+-#define E1000_PF_CONTROL_MSG 0x0100 /* PF control message */
++#define E1000_PF_CONTROL_MSG 0x0100 /* PF control message */
++
++#define E1000_VF_MBX_INIT_TIMEOUT 2000 /* number of retries on mailbox */
++#define E1000_VF_MBX_INIT_DELAY 500 /* microseconds between retries */
+
+-s32 igb_read_mbx(struct e1000_hw *, u32 *, u16, u16);
+-s32 igb_write_mbx(struct e1000_hw *, u32 *, u16, u16);
+-s32 igb_check_for_msg(struct e1000_hw *, u16);
+-s32 igb_check_for_ack(struct e1000_hw *, u16);
+-s32 igb_check_for_rst(struct e1000_hw *, u16);
+-s32 igb_init_mbx_params_pf(struct e1000_hw *);
++s32 e1000_read_mbx(struct e1000_hw *, u32 *, u16, u16);
++s32 e1000_write_mbx(struct e1000_hw *, u32 *, u16, u16);
++s32 e1000_read_posted_mbx(struct e1000_hw *, u32 *, u16, u16);
++s32 e1000_write_posted_mbx(struct e1000_hw *, u32 *, u16, u16);
++s32 e1000_check_for_msg(struct e1000_hw *, u16);
++s32 e1000_check_for_ack(struct e1000_hw *, u16);
++s32 e1000_check_for_rst(struct e1000_hw *, u16);
++void e1000_init_mbx_ops_generic(struct e1000_hw *hw);
++s32 e1000_init_mbx_params_pf(struct e1000_hw *);
+
+ #endif /* _E1000_MBX_H_ */
+diff -Nu a/drivers/net/ethernet/intel/igb/e1000_nvm.c b/drivers/net/ethernet/intel/igb/e1000_nvm.c
+--- a/drivers/net/ethernet/intel/igb/e1000_nvm.c 2016-11-13 09:20:24.790171605 +0000
++++ b/drivers/net/ethernet/intel/igb/e1000_nvm.c 2016-11-14 14:32:08.579567168 +0000
+@@ -1,63 +1,131 @@
+-/* Intel(R) Gigabit Ethernet Linux driver
+- * Copyright(c) 2007-2014 Intel Corporation.
+- * This program is free software; you can redistribute it and/or modify it
+- * under the terms and conditions of the GNU General Public License,
+- * version 2, as published by the Free Software Foundation.
+- *
+- * This program is distributed in the hope it will be useful, but WITHOUT
+- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+- * more details.
+- *
+- * You should have received a copy of the GNU General Public License along with
+- * this program; if not, see .
+- *
+- * The full GNU General Public License is included in this distribution in
+- * the file called "COPYING".
++/*******************************************************************************
++
++ Intel(R) Gigabit Ethernet Linux driver
++ Copyright(c) 2007-2015 Intel Corporation.
++
++ This program is free software; you can redistribute it and/or modify it
++ under the terms and conditions of the GNU General Public License,
++ version 2, as published by the Free Software Foundation.
++
++ This program is distributed in the hope it will be useful, but WITHOUT
++ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ more details.
++
++ The full GNU General Public License is included in this distribution in
++ the file called "COPYING".
++
++ Contact Information:
++ Linux NICS
++ e1000-devel Mailing List
++ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
++
++*******************************************************************************/
++
++#include "e1000_api.h"
++
++static void e1000_reload_nvm_generic(struct e1000_hw *hw);
++
++/**
++ * e1000_init_nvm_ops_generic - Initialize NVM function pointers
++ * @hw: pointer to the HW structure
+ *
+- * Contact Information:
+- * e1000-devel Mailing List
+- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+- */
++ * Setups up the function pointers to no-op functions
++ **/
++void e1000_init_nvm_ops_generic(struct e1000_hw *hw)
++{
++ struct e1000_nvm_info *nvm = &hw->nvm;
++ DEBUGFUNC("e1000_init_nvm_ops_generic");
++
++ /* Initialize function pointers */
++ nvm->ops.init_params = e1000_null_ops_generic;
++ nvm->ops.acquire = e1000_null_ops_generic;
++ nvm->ops.read = e1000_null_read_nvm;
++ nvm->ops.release = e1000_null_nvm_generic;
++ nvm->ops.reload = e1000_reload_nvm_generic;
++ nvm->ops.update = e1000_null_ops_generic;
++ nvm->ops.valid_led_default = e1000_null_led_default;
++ nvm->ops.validate = e1000_null_ops_generic;
++ nvm->ops.write = e1000_null_write_nvm;
++}
+
+-#include
+-#include
++/**
++ * e1000_null_nvm_read - No-op function, return 0
++ * @hw: pointer to the HW structure
++ **/
++s32 e1000_null_read_nvm(struct e1000_hw E1000_UNUSEDARG *hw,
++ u16 E1000_UNUSEDARG a, u16 E1000_UNUSEDARG b,
++ u16 E1000_UNUSEDARG *c)
++{
++ DEBUGFUNC("e1000_null_read_nvm");
++ return E1000_SUCCESS;
++}
+
+-#include "e1000_mac.h"
+-#include "e1000_nvm.h"
++/**
++ * e1000_null_nvm_generic - No-op function, return void
++ * @hw: pointer to the HW structure
++ **/
++void e1000_null_nvm_generic(struct e1000_hw E1000_UNUSEDARG *hw)
++{
++ DEBUGFUNC("e1000_null_nvm_generic");
++ return;
++}
+
+ /**
+- * igb_raise_eec_clk - Raise EEPROM clock
++ * e1000_null_led_default - No-op function, return 0
++ * @hw: pointer to the HW structure
++ **/
++s32 e1000_null_led_default(struct e1000_hw E1000_UNUSEDARG *hw,
++ u16 E1000_UNUSEDARG *data)
++{
++ DEBUGFUNC("e1000_null_led_default");
++ return E1000_SUCCESS;
++}
++
++/**
++ * e1000_null_write_nvm - No-op function, return 0
++ * @hw: pointer to the HW structure
++ **/
++s32 e1000_null_write_nvm(struct e1000_hw E1000_UNUSEDARG *hw,
++ u16 E1000_UNUSEDARG a, u16 E1000_UNUSEDARG b,
++ u16 E1000_UNUSEDARG *c)
++{
++ DEBUGFUNC("e1000_null_write_nvm");
++ return E1000_SUCCESS;
++}
++
++/**
++ * e1000_raise_eec_clk - Raise EEPROM clock
+ * @hw: pointer to the HW structure
+ * @eecd: pointer to the EEPROM
+ *
+ * Enable/Raise the EEPROM clock bit.
+ **/
+-static void igb_raise_eec_clk(struct e1000_hw *hw, u32 *eecd)
++static void e1000_raise_eec_clk(struct e1000_hw *hw, u32 *eecd)
+ {
+ *eecd = *eecd | E1000_EECD_SK;
+- wr32(E1000_EECD, *eecd);
+- wrfl();
+- udelay(hw->nvm.delay_usec);
++ E1000_WRITE_REG(hw, E1000_EECD, *eecd);
++ E1000_WRITE_FLUSH(hw);
++ usec_delay(hw->nvm.delay_usec);
+ }
+
+ /**
+- * igb_lower_eec_clk - Lower EEPROM clock
++ * e1000_lower_eec_clk - Lower EEPROM clock
+ * @hw: pointer to the HW structure
+ * @eecd: pointer to the EEPROM
+ *
+ * Clear/Lower the EEPROM clock bit.
+ **/
+-static void igb_lower_eec_clk(struct e1000_hw *hw, u32 *eecd)
++static void e1000_lower_eec_clk(struct e1000_hw *hw, u32 *eecd)
+ {
+ *eecd = *eecd & ~E1000_EECD_SK;
+- wr32(E1000_EECD, *eecd);
+- wrfl();
+- udelay(hw->nvm.delay_usec);
++ E1000_WRITE_REG(hw, E1000_EECD, *eecd);
++ E1000_WRITE_FLUSH(hw);
++ usec_delay(hw->nvm.delay_usec);
+ }
+
+ /**
+- * igb_shift_out_eec_bits - Shift data bits our to the EEPROM
++ * e1000_shift_out_eec_bits - Shift data bits our to the EEPROM
+ * @hw: pointer to the HW structure
+ * @data: data to send to the EEPROM
+ * @count: number of bits to shift out
+@@ -66,12 +134,14 @@
+ * "data" parameter will be shifted out to the EEPROM one bit at a time.
+ * In order to do this, "data" must be broken down into bits.
+ **/
+-static void igb_shift_out_eec_bits(struct e1000_hw *hw, u16 data, u16 count)
++static void e1000_shift_out_eec_bits(struct e1000_hw *hw, u16 data, u16 count)
+ {
+ struct e1000_nvm_info *nvm = &hw->nvm;
+- u32 eecd = rd32(E1000_EECD);
++ u32 eecd = E1000_READ_REG(hw, E1000_EECD);
+ u32 mask;
+
++ DEBUGFUNC("e1000_shift_out_eec_bits");
++
+ mask = 0x01 << (count - 1);
+ if (nvm->type == e1000_nvm_eeprom_spi)
+ eecd |= E1000_EECD_DO;
+@@ -82,23 +152,23 @@
+ if (data & mask)
+ eecd |= E1000_EECD_DI;
+
+- wr32(E1000_EECD, eecd);
+- wrfl();
++ E1000_WRITE_REG(hw, E1000_EECD, eecd);
++ E1000_WRITE_FLUSH(hw);
+
+- udelay(nvm->delay_usec);
++ usec_delay(nvm->delay_usec);
+
+- igb_raise_eec_clk(hw, &eecd);
+- igb_lower_eec_clk(hw, &eecd);
++ e1000_raise_eec_clk(hw, &eecd);
++ e1000_lower_eec_clk(hw, &eecd);
+
+ mask >>= 1;
+ } while (mask);
+
+ eecd &= ~E1000_EECD_DI;
+- wr32(E1000_EECD, eecd);
++ E1000_WRITE_REG(hw, E1000_EECD, eecd);
+ }
+
+ /**
+- * igb_shift_in_eec_bits - Shift data bits in from the EEPROM
++ * e1000_shift_in_eec_bits - Shift data bits in from the EEPROM
+ * @hw: pointer to the HW structure
+ * @count: number of bits to shift in
+ *
+@@ -108,121 +178,124 @@
+ * "DO" bit. During this "shifting in" process the data in "DI" bit should
+ * always be clear.
+ **/
+-static u16 igb_shift_in_eec_bits(struct e1000_hw *hw, u16 count)
++static u16 e1000_shift_in_eec_bits(struct e1000_hw *hw, u16 count)
+ {
+ u32 eecd;
+ u32 i;
+ u16 data;
+
+- eecd = rd32(E1000_EECD);
++ DEBUGFUNC("e1000_shift_in_eec_bits");
++
++ eecd = E1000_READ_REG(hw, E1000_EECD);
+
+ eecd &= ~(E1000_EECD_DO | E1000_EECD_DI);
+ data = 0;
+
+ for (i = 0; i < count; i++) {
+ data <<= 1;
+- igb_raise_eec_clk(hw, &eecd);
++ e1000_raise_eec_clk(hw, &eecd);
+
+- eecd = rd32(E1000_EECD);
++ eecd = E1000_READ_REG(hw, E1000_EECD);
+
+ eecd &= ~E1000_EECD_DI;
+ if (eecd & E1000_EECD_DO)
+ data |= 1;
+
+- igb_lower_eec_clk(hw, &eecd);
++ e1000_lower_eec_clk(hw, &eecd);
+ }
+
+ return data;
+ }
+
+ /**
+- * igb_poll_eerd_eewr_done - Poll for EEPROM read/write completion
++ * e1000_poll_eerd_eewr_done - Poll for EEPROM read/write completion
+ * @hw: pointer to the HW structure
+ * @ee_reg: EEPROM flag for polling
+ *
+ * Polls the EEPROM status bit for either read or write completion based
+ * upon the value of 'ee_reg'.
+ **/
+-static s32 igb_poll_eerd_eewr_done(struct e1000_hw *hw, int ee_reg)
++s32 e1000_poll_eerd_eewr_done(struct e1000_hw *hw, int ee_reg)
+ {
+ u32 attempts = 100000;
+ u32 i, reg = 0;
+- s32 ret_val = -E1000_ERR_NVM;
++
++ DEBUGFUNC("e1000_poll_eerd_eewr_done");
+
+ for (i = 0; i < attempts; i++) {
+ if (ee_reg == E1000_NVM_POLL_READ)
+- reg = rd32(E1000_EERD);
++ reg = E1000_READ_REG(hw, E1000_EERD);
+ else
+- reg = rd32(E1000_EEWR);
++ reg = E1000_READ_REG(hw, E1000_EEWR);
+
+- if (reg & E1000_NVM_RW_REG_DONE) {
+- ret_val = 0;
+- break;
+- }
++ if (reg & E1000_NVM_RW_REG_DONE)
++ return E1000_SUCCESS;
+
+- udelay(5);
++ usec_delay(5);
+ }
+
+- return ret_val;
++ return -E1000_ERR_NVM;
+ }
+
+ /**
+- * igb_acquire_nvm - Generic request for access to EEPROM
++ * e1000_acquire_nvm_generic - Generic request for access to EEPROM
+ * @hw: pointer to the HW structure
+ *
+ * Set the EEPROM access request bit and wait for EEPROM access grant bit.
+ * Return successful if access grant bit set, else clear the request for
+ * EEPROM access and return -E1000_ERR_NVM (-1).
+ **/
+-s32 igb_acquire_nvm(struct e1000_hw *hw)
++s32 e1000_acquire_nvm_generic(struct e1000_hw *hw)
+ {
+- u32 eecd = rd32(E1000_EECD);
++ u32 eecd = E1000_READ_REG(hw, E1000_EECD);
+ s32 timeout = E1000_NVM_GRANT_ATTEMPTS;
+- s32 ret_val = 0;
+
++ DEBUGFUNC("e1000_acquire_nvm_generic");
+
+- wr32(E1000_EECD, eecd | E1000_EECD_REQ);
+- eecd = rd32(E1000_EECD);
++ E1000_WRITE_REG(hw, E1000_EECD, eecd | E1000_EECD_REQ);
++ eecd = E1000_READ_REG(hw, E1000_EECD);
+
+ while (timeout) {
+ if (eecd & E1000_EECD_GNT)
+ break;
+- udelay(5);
+- eecd = rd32(E1000_EECD);
++ usec_delay(5);
++ eecd = E1000_READ_REG(hw, E1000_EECD);
+ timeout--;
+ }
+
+ if (!timeout) {
+ eecd &= ~E1000_EECD_REQ;
+- wr32(E1000_EECD, eecd);
+- hw_dbg("Could not acquire NVM grant\n");
+- ret_val = -E1000_ERR_NVM;
++ E1000_WRITE_REG(hw, E1000_EECD, eecd);
++ DEBUGOUT("Could not acquire NVM grant\n");
++ return -E1000_ERR_NVM;
+ }
+
+- return ret_val;
++ return E1000_SUCCESS;
+ }
+
+ /**
+- * igb_standby_nvm - Return EEPROM to standby state
++ * e1000_standby_nvm - Return EEPROM to standby state
+ * @hw: pointer to the HW structure
+ *
+ * Return the EEPROM to a standby state.
+ **/
+-static void igb_standby_nvm(struct e1000_hw *hw)
++static void e1000_standby_nvm(struct e1000_hw *hw)
+ {
+ struct e1000_nvm_info *nvm = &hw->nvm;
+- u32 eecd = rd32(E1000_EECD);
++ u32 eecd = E1000_READ_REG(hw, E1000_EECD);
++
++ DEBUGFUNC("e1000_standby_nvm");
+
+ if (nvm->type == e1000_nvm_eeprom_spi) {
+ /* Toggle CS to flush commands */
+ eecd |= E1000_EECD_CS;
+- wr32(E1000_EECD, eecd);
+- wrfl();
+- udelay(nvm->delay_usec);
++ E1000_WRITE_REG(hw, E1000_EECD, eecd);
++ E1000_WRITE_FLUSH(hw);
++ usec_delay(nvm->delay_usec);
+ eecd &= ~E1000_EECD_CS;
+- wr32(E1000_EECD, eecd);
+- wrfl();
+- udelay(nvm->delay_usec);
++ E1000_WRITE_REG(hw, E1000_EECD, eecd);
++ E1000_WRITE_FLUSH(hw);
++ usec_delay(nvm->delay_usec);
+ }
+ }
+
+@@ -236,53 +309,57 @@
+ {
+ u32 eecd;
+
+- eecd = rd32(E1000_EECD);
++ DEBUGFUNC("e1000_stop_nvm");
++
++ eecd = E1000_READ_REG(hw, E1000_EECD);
+ if (hw->nvm.type == e1000_nvm_eeprom_spi) {
+ /* Pull CS high */
+ eecd |= E1000_EECD_CS;
+- igb_lower_eec_clk(hw, &eecd);
++ e1000_lower_eec_clk(hw, &eecd);
+ }
+ }
+
+ /**
+- * igb_release_nvm - Release exclusive access to EEPROM
++ * e1000_release_nvm_generic - Release exclusive access to EEPROM
+ * @hw: pointer to the HW structure
+ *
+ * Stop any current commands to the EEPROM and clear the EEPROM request bit.
+ **/
+-void igb_release_nvm(struct e1000_hw *hw)
++void e1000_release_nvm_generic(struct e1000_hw *hw)
+ {
+ u32 eecd;
+
++ DEBUGFUNC("e1000_release_nvm_generic");
++
+ e1000_stop_nvm(hw);
+
+- eecd = rd32(E1000_EECD);
++ eecd = E1000_READ_REG(hw, E1000_EECD);
+ eecd &= ~E1000_EECD_REQ;
+- wr32(E1000_EECD, eecd);
++ E1000_WRITE_REG(hw, E1000_EECD, eecd);
+ }
+
+ /**
+- * igb_ready_nvm_eeprom - Prepares EEPROM for read/write
++ * e1000_ready_nvm_eeprom - Prepares EEPROM for read/write
+ * @hw: pointer to the HW structure
+ *
+ * Setups the EEPROM for reading and writing.
+ **/
+-static s32 igb_ready_nvm_eeprom(struct e1000_hw *hw)
++static s32 e1000_ready_nvm_eeprom(struct e1000_hw *hw)
+ {
+ struct e1000_nvm_info *nvm = &hw->nvm;
+- u32 eecd = rd32(E1000_EECD);
+- s32 ret_val = 0;
+- u16 timeout = 0;
++ u32 eecd = E1000_READ_REG(hw, E1000_EECD);
+ u8 spi_stat_reg;
+
++ DEBUGFUNC("e1000_ready_nvm_eeprom");
+
+ if (nvm->type == e1000_nvm_eeprom_spi) {
++ u16 timeout = NVM_MAX_RETRY_SPI;
++
+ /* Clear SK and CS */
+ eecd &= ~(E1000_EECD_CS | E1000_EECD_SK);
+- wr32(E1000_EECD, eecd);
+- wrfl();
+- udelay(1);
+- timeout = NVM_MAX_RETRY_SPI;
++ E1000_WRITE_REG(hw, E1000_EECD, eecd);
++ E1000_WRITE_FLUSH(hw);
++ usec_delay(1);
+
+ /* Read "Status Register" repeatedly until the LSB is cleared.
+ * The EEPROM will signal that the command has been completed
+@@ -290,30 +367,28 @@
+ * not cleared within 'timeout', then error out.
+ */
+ while (timeout) {
+- igb_shift_out_eec_bits(hw, NVM_RDSR_OPCODE_SPI,
+- hw->nvm.opcode_bits);
+- spi_stat_reg = (u8)igb_shift_in_eec_bits(hw, 8);
++ e1000_shift_out_eec_bits(hw, NVM_RDSR_OPCODE_SPI,
++ hw->nvm.opcode_bits);
++ spi_stat_reg = (u8)e1000_shift_in_eec_bits(hw, 8);
+ if (!(spi_stat_reg & NVM_STATUS_RDY_SPI))
+ break;
+
+- udelay(5);
+- igb_standby_nvm(hw);
++ usec_delay(5);
++ e1000_standby_nvm(hw);
+ timeout--;
+ }
+
+ if (!timeout) {
+- hw_dbg("SPI NVM Status error\n");
+- ret_val = -E1000_ERR_NVM;
+- goto out;
++ DEBUGOUT("SPI NVM Status error\n");
++ return -E1000_ERR_NVM;
+ }
+ }
+
+-out:
+- return ret_val;
++ return E1000_SUCCESS;
+ }
+
+ /**
+- * igb_read_nvm_spi - Read EEPROM's using SPI
++ * e1000_read_nvm_spi - Read EEPROM's using SPI
+ * @hw: pointer to the HW structure
+ * @offset: offset of word in the EEPROM to read
+ * @words: number of words to read
+@@ -321,7 +396,7 @@
+ *
+ * Reads a 16 bit word from the EEPROM.
+ **/
+-s32 igb_read_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
++s32 e1000_read_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
+ {
+ struct e1000_nvm_info *nvm = &hw->nvm;
+ u32 i = 0;
+@@ -329,51 +404,51 @@
+ u16 word_in;
+ u8 read_opcode = NVM_READ_OPCODE_SPI;
+
++ DEBUGFUNC("e1000_read_nvm_spi");
++
+ /* A check for invalid values: offset too large, too many words,
+ * and not enough words.
+ */
+ if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
+ (words == 0)) {
+- hw_dbg("nvm parameter(s) out of bounds\n");
+- ret_val = -E1000_ERR_NVM;
+- goto out;
++ DEBUGOUT("nvm parameter(s) out of bounds\n");
++ return -E1000_ERR_NVM;
+ }
+
+ ret_val = nvm->ops.acquire(hw);
+ if (ret_val)
+- goto out;
++ return ret_val;
+
+- ret_val = igb_ready_nvm_eeprom(hw);
++ ret_val = e1000_ready_nvm_eeprom(hw);
+ if (ret_val)
+ goto release;
+
+- igb_standby_nvm(hw);
++ e1000_standby_nvm(hw);
+
+ if ((nvm->address_bits == 8) && (offset >= 128))
+ read_opcode |= NVM_A8_OPCODE_SPI;
+
+ /* Send the READ command (opcode + addr) */
+- igb_shift_out_eec_bits(hw, read_opcode, nvm->opcode_bits);
+- igb_shift_out_eec_bits(hw, (u16)(offset*2), nvm->address_bits);
++ e1000_shift_out_eec_bits(hw, read_opcode, nvm->opcode_bits);
++ e1000_shift_out_eec_bits(hw, (u16)(offset*2), nvm->address_bits);
+
+ /* Read the data. SPI NVMs increment the address with each byte
+ * read and will roll over if reading beyond the end. This allows
+ * us to read the whole NVM from any offset
+ */
+ for (i = 0; i < words; i++) {
+- word_in = igb_shift_in_eec_bits(hw, 16);
++ word_in = e1000_shift_in_eec_bits(hw, 16);
+ data[i] = (word_in >> 8) | (word_in << 8);
+ }
+
+ release:
+ nvm->ops.release(hw);
+
+-out:
+ return ret_val;
+ }
+
+ /**
+- * igb_read_nvm_eerd - Reads EEPROM using EERD register
++ * e1000_read_nvm_eerd - Reads EEPROM using EERD register
+ * @hw: pointer to the HW structure
+ * @offset: offset of word in the EEPROM to read
+ * @words: number of words to read
+@@ -381,41 +456,44 @@
+ *
+ * Reads a 16 bit word from the EEPROM using the EERD register.
+ **/
+-s32 igb_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
++s32 e1000_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
+ {
+ struct e1000_nvm_info *nvm = &hw->nvm;
+ u32 i, eerd = 0;
+- s32 ret_val = 0;
++ s32 ret_val = E1000_SUCCESS;
++
++ DEBUGFUNC("e1000_read_nvm_eerd");
+
+ /* A check for invalid values: offset too large, too many words,
+- * and not enough words.
++ * too many words for the offset, and not enough words.
+ */
+ if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
+ (words == 0)) {
+- hw_dbg("nvm parameter(s) out of bounds\n");
+- ret_val = -E1000_ERR_NVM;
+- goto out;
++ DEBUGOUT("nvm parameter(s) out of bounds\n");
++ return -E1000_ERR_NVM;
+ }
+
+ for (i = 0; i < words; i++) {
+ eerd = ((offset+i) << E1000_NVM_RW_ADDR_SHIFT) +
+- E1000_NVM_RW_REG_START;
++ E1000_NVM_RW_REG_START;
+
+- wr32(E1000_EERD, eerd);
+- ret_val = igb_poll_eerd_eewr_done(hw, E1000_NVM_POLL_READ);
++ E1000_WRITE_REG(hw, E1000_EERD, eerd);
++ ret_val = e1000_poll_eerd_eewr_done(hw, E1000_NVM_POLL_READ);
+ if (ret_val)
+ break;
+
+- data[i] = (rd32(E1000_EERD) >>
+- E1000_NVM_RW_REG_DATA);
++ data[i] = (E1000_READ_REG(hw, E1000_EERD) >>
++ E1000_NVM_RW_REG_DATA);
+ }
+
+-out:
++ if (ret_val)
++ DEBUGOUT1("NVM read error: %d\n", ret_val);
++
+ return ret_val;
+ }
+
+ /**
+- * igb_write_nvm_spi - Write to EEPROM using SPI
++ * e1000_write_nvm_spi - Write to EEPROM using SPI
+ * @hw: pointer to the HW structure
+ * @offset: offset within the EEPROM to be written to
+ * @words: number of words to write
+@@ -424,21 +502,23 @@
+ * Writes data to EEPROM at offset using SPI interface.
+ *
+ * If e1000_update_nvm_checksum is not called after this function , the
+- * EEPROM will most likley contain an invalid checksum.
++ * EEPROM will most likely contain an invalid checksum.
+ **/
+-s32 igb_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
++s32 e1000_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
+ {
+ struct e1000_nvm_info *nvm = &hw->nvm;
+ s32 ret_val = -E1000_ERR_NVM;
+ u16 widx = 0;
+
++ DEBUGFUNC("e1000_write_nvm_spi");
++
+ /* A check for invalid values: offset too large, too many words,
+ * and not enough words.
+ */
+ if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
+ (words == 0)) {
+- hw_dbg("nvm parameter(s) out of bounds\n");
+- return ret_val;
++ DEBUGOUT("nvm parameter(s) out of bounds\n");
++ return -E1000_ERR_NVM;
+ }
+
+ while (widx < words) {
+@@ -448,19 +528,19 @@
+ if (ret_val)
+ return ret_val;
+
+- ret_val = igb_ready_nvm_eeprom(hw);
++ ret_val = e1000_ready_nvm_eeprom(hw);
+ if (ret_val) {
+ nvm->ops.release(hw);
+ return ret_val;
+ }
+
+- igb_standby_nvm(hw);
++ e1000_standby_nvm(hw);
+
+ /* Send the WRITE ENABLE command (8 bit opcode) */
+- igb_shift_out_eec_bits(hw, NVM_WREN_OPCODE_SPI,
++ e1000_shift_out_eec_bits(hw, NVM_WREN_OPCODE_SPI,
+ nvm->opcode_bits);
+
+- igb_standby_nvm(hw);
++ e1000_standby_nvm(hw);
+
+ /* Some SPI eeproms use the 8th address bit embedded in the
+ * opcode
+@@ -469,24 +549,23 @@
+ write_opcode |= NVM_A8_OPCODE_SPI;
+
+ /* Send the Write command (8-bit opcode + addr) */
+- igb_shift_out_eec_bits(hw, write_opcode, nvm->opcode_bits);
+- igb_shift_out_eec_bits(hw, (u16)((offset + widx) * 2),
++ e1000_shift_out_eec_bits(hw, write_opcode, nvm->opcode_bits);
++ e1000_shift_out_eec_bits(hw, (u16)((offset + widx) * 2),
+ nvm->address_bits);
+
+ /* Loop to allow for up to whole page write of eeprom */
+ while (widx < words) {
+ u16 word_out = data[widx];
+-
+ word_out = (word_out >> 8) | (word_out << 8);
+- igb_shift_out_eec_bits(hw, word_out, 16);
++ e1000_shift_out_eec_bits(hw, word_out, 16);
+ widx++;
+
+ if ((((offset + widx) * 2) % nvm->page_size) == 0) {
+- igb_standby_nvm(hw);
++ e1000_standby_nvm(hw);
+ break;
+ }
+ }
+- usleep_range(1000, 2000);
++ msec_delay(10);
+ nvm->ops.release(hw);
+ }
+
+@@ -494,132 +573,199 @@
+ }
+
+ /**
+- * igb_read_part_string - Read device part number
++ * igb_e1000_read_pba_string_generic - Read device part number
+ * @hw: pointer to the HW structure
+- * @part_num: pointer to device part number
+- * @part_num_size: size of part number buffer
++ * @pba_num: pointer to device part number
++ * @pba_num_size: size of part number buffer
+ *
+ * Reads the product board assembly (PBA) number from the EEPROM and stores
+- * the value in part_num.
++ * the value in pba_num.
+ **/
+-s32 igb_read_part_string(struct e1000_hw *hw, u8 *part_num, u32 part_num_size)
++/* Changed name, duplicated with e1000 */
++s32 igb_e1000_read_pba_string_generic(struct e1000_hw *hw, u8 *pba_num,
++ u32 pba_num_size)
+ {
+ s32 ret_val;
+ u16 nvm_data;
+- u16 pointer;
++ u16 pba_ptr;
+ u16 offset;
+ u16 length;
+
+- if (part_num == NULL) {
+- hw_dbg("PBA string buffer was null\n");
+- ret_val = E1000_ERR_INVALID_ARGUMENT;
+- goto out;
++ DEBUGFUNC("igb_e1000_read_pba_string_generic");
++
++ if ((hw->mac.type >= e1000_i210) &&
++ !e1000_get_flash_presence_i210(hw)) {
++ DEBUGOUT("Flashless no PBA string\n");
++ return -E1000_ERR_NVM_PBA_SECTION;
++ }
++
++ if (pba_num == NULL) {
++ DEBUGOUT("PBA string buffer was null\n");
++ return -E1000_ERR_INVALID_ARGUMENT;
+ }
+
+ ret_val = hw->nvm.ops.read(hw, NVM_PBA_OFFSET_0, 1, &nvm_data);
+ if (ret_val) {
+- hw_dbg("NVM Read Error\n");
+- goto out;
++ DEBUGOUT("NVM Read Error\n");
++ return ret_val;
+ }
+
+- ret_val = hw->nvm.ops.read(hw, NVM_PBA_OFFSET_1, 1, &pointer);
++ ret_val = hw->nvm.ops.read(hw, NVM_PBA_OFFSET_1, 1, &pba_ptr);
+ if (ret_val) {
+- hw_dbg("NVM Read Error\n");
+- goto out;
++ DEBUGOUT("NVM Read Error\n");
++ return ret_val;
+ }
+
+ /* if nvm_data is not ptr guard the PBA must be in legacy format which
+- * means pointer is actually our second data word for the PBA number
++ * means pba_ptr is actually our second data word for the PBA number
+ * and we can decode it into an ascii string
+ */
+ if (nvm_data != NVM_PBA_PTR_GUARD) {
+- hw_dbg("NVM PBA number is not stored as string\n");
++ DEBUGOUT("NVM PBA number is not stored as string\n");
+
+- /* we will need 11 characters to store the PBA */
+- if (part_num_size < 11) {
+- hw_dbg("PBA string buffer too small\n");
++ /* make sure callers buffer is big enough to store the PBA */
++ if (pba_num_size < E1000_PBANUM_LENGTH) {
++ DEBUGOUT("PBA string buffer too small\n");
+ return E1000_ERR_NO_SPACE;
+ }
+
+- /* extract hex string from data and pointer */
+- part_num[0] = (nvm_data >> 12) & 0xF;
+- part_num[1] = (nvm_data >> 8) & 0xF;
+- part_num[2] = (nvm_data >> 4) & 0xF;
+- part_num[3] = nvm_data & 0xF;
+- part_num[4] = (pointer >> 12) & 0xF;
+- part_num[5] = (pointer >> 8) & 0xF;
+- part_num[6] = '-';
+- part_num[7] = 0;
+- part_num[8] = (pointer >> 4) & 0xF;
+- part_num[9] = pointer & 0xF;
++ /* extract hex string from data and pba_ptr */
++ pba_num[0] = (nvm_data >> 12) & 0xF;
++ pba_num[1] = (nvm_data >> 8) & 0xF;
++ pba_num[2] = (nvm_data >> 4) & 0xF;
++ pba_num[3] = nvm_data & 0xF;
++ pba_num[4] = (pba_ptr >> 12) & 0xF;
++ pba_num[5] = (pba_ptr >> 8) & 0xF;
++ pba_num[6] = '-';
++ pba_num[7] = 0;
++ pba_num[8] = (pba_ptr >> 4) & 0xF;
++ pba_num[9] = pba_ptr & 0xF;
+
+ /* put a null character on the end of our string */
+- part_num[10] = '\0';
++ pba_num[10] = '\0';
+
+ /* switch all the data but the '-' to hex char */
+ for (offset = 0; offset < 10; offset++) {
+- if (part_num[offset] < 0xA)
+- part_num[offset] += '0';
+- else if (part_num[offset] < 0x10)
+- part_num[offset] += 'A' - 0xA;
++ if (pba_num[offset] < 0xA)
++ pba_num[offset] += '0';
++ else if (pba_num[offset] < 0x10)
++ pba_num[offset] += 'A' - 0xA;
+ }
+
+- goto out;
++ return E1000_SUCCESS;
+ }
+
+- ret_val = hw->nvm.ops.read(hw, pointer, 1, &length);
++ ret_val = hw->nvm.ops.read(hw, pba_ptr, 1, &length);
+ if (ret_val) {
+- hw_dbg("NVM Read Error\n");
+- goto out;
++ DEBUGOUT("NVM Read Error\n");
++ return ret_val;
+ }
+
+ if (length == 0xFFFF || length == 0) {
+- hw_dbg("NVM PBA number section invalid length\n");
+- ret_val = E1000_ERR_NVM_PBA_SECTION;
+- goto out;
+- }
+- /* check if part_num buffer is big enough */
+- if (part_num_size < (((u32)length * 2) - 1)) {
+- hw_dbg("PBA string buffer too small\n");
+- ret_val = E1000_ERR_NO_SPACE;
+- goto out;
++ DEBUGOUT("NVM PBA number section invalid length\n");
++ return -E1000_ERR_NVM_PBA_SECTION;
++ }
++ /* check if pba_num buffer is big enough */
++ if (pba_num_size < (((u32)length * 2) - 1)) {
++ DEBUGOUT("PBA string buffer too small\n");
++ return -E1000_ERR_NO_SPACE;
+ }
+
+ /* trim pba length from start of string */
+- pointer++;
++ pba_ptr++;
+ length--;
+
+ for (offset = 0; offset < length; offset++) {
+- ret_val = hw->nvm.ops.read(hw, pointer + offset, 1, &nvm_data);
++ ret_val = hw->nvm.ops.read(hw, pba_ptr + offset, 1, &nvm_data);
+ if (ret_val) {
+- hw_dbg("NVM Read Error\n");
+- goto out;
++ DEBUGOUT("NVM Read Error\n");
++ return ret_val;
+ }
+- part_num[offset * 2] = (u8)(nvm_data >> 8);
+- part_num[(offset * 2) + 1] = (u8)(nvm_data & 0xFF);
++ pba_num[offset * 2] = (u8)(nvm_data >> 8);
++ pba_num[(offset * 2) + 1] = (u8)(nvm_data & 0xFF);
+ }
+- part_num[offset * 2] = '\0';
++ pba_num[offset * 2] = '\0';
+
+-out:
+- return ret_val;
++ return E1000_SUCCESS;
+ }
+
+ /**
+- * igb_read_mac_addr - Read device MAC address
++ * e1000_read_pba_length_generic - Read device part number length
++ * @hw: pointer to the HW structure
++ * @pba_num_size: size of part number buffer
++ *
++ * Reads the product board assembly (PBA) number length from the EEPROM and
++ * stores the value in pba_num_size.
++ **/
++s32 e1000_read_pba_length_generic(struct e1000_hw *hw, u32 *pba_num_size)
++{
++ s32 ret_val;
++ u16 nvm_data;
++ u16 pba_ptr;
++ u16 length;
++
++ DEBUGFUNC("e1000_read_pba_length_generic");
++
++ if (pba_num_size == NULL) {
++ DEBUGOUT("PBA buffer size was null\n");
++ return -E1000_ERR_INVALID_ARGUMENT;
++ }
++
++ ret_val = hw->nvm.ops.read(hw, NVM_PBA_OFFSET_0, 1, &nvm_data);
++ if (ret_val) {
++ DEBUGOUT("NVM Read Error\n");
++ return ret_val;
++ }
++
++ ret_val = hw->nvm.ops.read(hw, NVM_PBA_OFFSET_1, 1, &pba_ptr);
++ if (ret_val) {
++ DEBUGOUT("NVM Read Error\n");
++ return ret_val;
++ }
++
++ /* if data is not ptr guard the PBA must be in legacy format */
++ if (nvm_data != NVM_PBA_PTR_GUARD) {
++ *pba_num_size = E1000_PBANUM_LENGTH;
++ return E1000_SUCCESS;
++ }
++
++ ret_val = hw->nvm.ops.read(hw, pba_ptr, 1, &length);
++ if (ret_val) {
++ DEBUGOUT("NVM Read Error\n");
++ return ret_val;
++ }
++
++ if (length == 0xFFFF || length == 0) {
++ DEBUGOUT("NVM PBA number section invalid length\n");
++ return -E1000_ERR_NVM_PBA_SECTION;
++ }
++
++ /* Convert from length in u16 values to u8 chars, add 1 for NULL,
++ * and subtract 2 because length field is included in length.
++ */
++ *pba_num_size = ((u32)length * 2) - 1;
++
++ return E1000_SUCCESS;
++}
++
++/**
++ * igb_e1000_read_mac_addr_generic - Read device MAC address
+ * @hw: pointer to the HW structure
+ *
+ * Reads the device MAC address from the EEPROM and stores the value.
+ * Since devices with two ports use the same EEPROM, we increment the
+ * last bit in the MAC address for the second port.
+ **/
+-s32 igb_read_mac_addr(struct e1000_hw *hw)
++
++/* Changed name, duplicated with e1000 */
++s32 igb_e1000_read_mac_addr_generic(struct e1000_hw *hw)
+ {
+ u32 rar_high;
+ u32 rar_low;
+ u16 i;
+
+- rar_high = rd32(E1000_RAH(0));
+- rar_low = rd32(E1000_RAL(0));
++ rar_high = E1000_READ_REG(hw, E1000_RAH(0));
++ rar_low = E1000_READ_REG(hw, E1000_RAL(0));
+
+ for (i = 0; i < E1000_RAL_MAC_ADDR_LEN; i++)
+ hw->mac.perm_addr[i] = (u8)(rar_low >> (i*8));
+@@ -627,83 +773,104 @@
+ for (i = 0; i < E1000_RAH_MAC_ADDR_LEN; i++)
+ hw->mac.perm_addr[i+4] = (u8)(rar_high >> (i*8));
+
+- for (i = 0; i < ETH_ALEN; i++)
++ for (i = 0; i < ETH_ADDR_LEN; i++)
+ hw->mac.addr[i] = hw->mac.perm_addr[i];
+
+- return 0;
++ return E1000_SUCCESS;
+ }
+
+ /**
+- * igb_validate_nvm_checksum - Validate EEPROM checksum
++ * e1000_validate_nvm_checksum_generic - Validate EEPROM checksum
+ * @hw: pointer to the HW structure
+ *
+ * Calculates the EEPROM checksum by reading/adding each word of the EEPROM
+ * and then verifies that the sum of the EEPROM is equal to 0xBABA.
+ **/
+-s32 igb_validate_nvm_checksum(struct e1000_hw *hw)
++s32 e1000_validate_nvm_checksum_generic(struct e1000_hw *hw)
+ {
+- s32 ret_val = 0;
++ s32 ret_val;
+ u16 checksum = 0;
+ u16 i, nvm_data;
+
++ DEBUGFUNC("e1000_validate_nvm_checksum_generic");
++
+ for (i = 0; i < (NVM_CHECKSUM_REG + 1); i++) {
+ ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data);
+ if (ret_val) {
+- hw_dbg("NVM Read Error\n");
+- goto out;
++ DEBUGOUT("NVM Read Error\n");
++ return ret_val;
+ }
+ checksum += nvm_data;
+ }
+
+ if (checksum != (u16) NVM_SUM) {
+- hw_dbg("NVM Checksum Invalid\n");
+- ret_val = -E1000_ERR_NVM;
+- goto out;
++ DEBUGOUT("NVM Checksum Invalid\n");
++ return -E1000_ERR_NVM;
+ }
+
+-out:
+- return ret_val;
++ return E1000_SUCCESS;
+ }
+
+ /**
+- * igb_update_nvm_checksum - Update EEPROM checksum
++ * e1000_update_nvm_checksum_generic - Update EEPROM checksum
+ * @hw: pointer to the HW structure
+ *
+ * Updates the EEPROM checksum by reading/adding each word of the EEPROM
+ * up to the checksum. Then calculates the EEPROM checksum and writes the
+ * value to the EEPROM.
+ **/
+-s32 igb_update_nvm_checksum(struct e1000_hw *hw)
++s32 e1000_update_nvm_checksum_generic(struct e1000_hw *hw)
+ {
+- s32 ret_val;
++ s32 ret_val;
+ u16 checksum = 0;
+ u16 i, nvm_data;
+
++ DEBUGFUNC("e1000_update_nvm_checksum");
++
+ for (i = 0; i < NVM_CHECKSUM_REG; i++) {
+ ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data);
+ if (ret_val) {
+- hw_dbg("NVM Read Error while updating checksum.\n");
+- goto out;
++ DEBUGOUT("NVM Read Error while updating checksum.\n");
++ return ret_val;
+ }
+ checksum += nvm_data;
+ }
+ checksum = (u16) NVM_SUM - checksum;
+ ret_val = hw->nvm.ops.write(hw, NVM_CHECKSUM_REG, 1, &checksum);
+ if (ret_val)
+- hw_dbg("NVM Write Error while updating checksum.\n");
++ DEBUGOUT("NVM Write Error while updating checksum.\n");
+
+-out:
+ return ret_val;
+ }
+
+ /**
+- * igb_get_fw_version - Get firmware version information
++ * e1000_reload_nvm_generic - Reloads EEPROM
+ * @hw: pointer to the HW structure
+- * @fw_vers: pointer to output structure
+ *
+- * unsupported MAC types will return all 0 version structure
++ * Reloads the EEPROM by setting the "Reinitialize from EEPROM" bit in the
++ * extended control register.
+ **/
+-void igb_get_fw_version(struct e1000_hw *hw, struct e1000_fw_version *fw_vers)
++static void e1000_reload_nvm_generic(struct e1000_hw *hw)
++{
++ u32 ctrl_ext;
++
++ DEBUGFUNC("e1000_reload_nvm_generic");
++
++ usec_delay(10);
++ ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
++ ctrl_ext |= E1000_CTRL_EXT_EE_RST;
++ E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
++ E1000_WRITE_FLUSH(hw);
++}
++
++/**
++ * e1000_get_fw_version - Get firmware version information
++ * @hw: pointer to the HW structure
++ * @fw_vers: pointer to output version structure
++ *
++ * unsupported/not present features return 0 in version structure
++ **/
++void e1000_get_fw_version(struct e1000_hw *hw, struct e1000_fw_version *fw_vers)
+ {
+ u16 eeprom_verh, eeprom_verl, etrack_test, fw_version;
+ u8 q, hval, rem, result;
+@@ -711,17 +878,18 @@
+
+ memset(fw_vers, 0, sizeof(struct e1000_fw_version));
+
+- /* basic eeprom version numbers and bits used vary by part and by tool
+- * used to create the nvm images. Check which data format we have.
+- */
+- hw->nvm.ops.read(hw, NVM_ETRACK_HIWORD, 1, &etrack_test);
++ /* basic eeprom version numbers, bits used vary by part and by tool
++ * used to create the nvm images */
++ /* Check which data format we have */
+ switch (hw->mac.type) {
+ case e1000_i211:
+- igb_read_invm_version(hw, fw_vers);
++ e1000_read_invm_version(hw, fw_vers);
+ return;
+ case e1000_82575:
+ case e1000_82576:
+ case e1000_82580:
++ case e1000_i354:
++ hw->nvm.ops.read(hw, NVM_ETRACK_HIWORD, 1, &etrack_test);
+ /* Use this format, unless EETRACK ID exists,
+ * then use alternate format
+ */
+@@ -736,12 +904,13 @@
+ }
+ break;
+ case e1000_i210:
+- if (!(igb_get_flash_presence_i210(hw))) {
+- igb_read_invm_version(hw, fw_vers);
++ if (!(e1000_get_flash_presence_i210(hw))) {
++ e1000_read_invm_version(hw, fw_vers);
+ return;
+ }
+ /* fall through */
+ case e1000_i350:
++ hw->nvm.ops.read(hw, NVM_ETRACK_HIWORD, 1, &etrack_test);
+ /* find combo image version */
+ hw->nvm.ops.read(hw, NVM_COMB_VER_PTR, 1, &comb_offset);
+ if ((comb_offset != 0x0) &&
+@@ -769,6 +938,7 @@
+ }
+ break;
+ default:
++ hw->nvm.ops.read(hw, NVM_ETRACK_HIWORD, 1, &etrack_test);
+ return;
+ }
+ hw->nvm.ops.read(hw, NVM_VERSION, 1, &fw_version);
+@@ -797,5 +967,11 @@
+ hw->nvm.ops.read(hw, (NVM_ETRACK_WORD + 1), 1, &eeprom_verh);
+ fw_vers->etrack_id = (eeprom_verh << NVM_ETRACK_SHIFT)
+ | eeprom_verl;
++ } else if ((etrack_test & NVM_ETRACK_VALID) == 0) {
++ hw->nvm.ops.read(hw, NVM_ETRACK_WORD, 1, &eeprom_verh);
++ hw->nvm.ops.read(hw, (NVM_ETRACK_WORD + 1), 1, &eeprom_verl);
++ fw_vers->etrack_id = (eeprom_verh << NVM_ETRACK_SHIFT) |
++ eeprom_verl;
+ }
+ }
++
+diff -Nu a/drivers/net/ethernet/intel/igb/e1000_nvm.h b/drivers/net/ethernet/intel/igb/e1000_nvm.h
+--- a/drivers/net/ethernet/intel/igb/e1000_nvm.h 2016-11-13 09:20:24.790171605 +0000
++++ b/drivers/net/ethernet/intel/igb/e1000_nvm.h 2016-11-14 14:32:08.579567168 +0000
+@@ -1,41 +1,30 @@
+-/* Intel(R) Gigabit Ethernet Linux driver
+- * Copyright(c) 2007-2014 Intel Corporation.
+- *
+- * This program is free software; you can redistribute it and/or modify it
+- * under the terms and conditions of the GNU General Public License,
+- * version 2, as published by the Free Software Foundation.
+- *
+- * This program is distributed in the hope it will be useful, but WITHOUT
+- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+- * more details.
+- *
+- * You should have received a copy of the GNU General Public License along with
+- * this program; if not, see .
+- *
+- * The full GNU General Public License is included in this distribution in
+- * the file called "COPYING".
+- *
+- * Contact Information:
+- * e1000-devel Mailing List
+- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+- */
++/*******************************************************************************
++
++ Intel(R) Gigabit Ethernet Linux driver
++ Copyright(c) 2007-2015 Intel Corporation.
++
++ This program is free software; you can redistribute it and/or modify it
++ under the terms and conditions of the GNU General Public License,
++ version 2, as published by the Free Software Foundation.
++
++ This program is distributed in the hope it will be useful, but WITHOUT
++ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ more details.
++
++ The full GNU General Public License is included in this distribution in
++ the file called "COPYING".
++
++ Contact Information:
++ Linux NICS
++ e1000-devel Mailing List
++ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
++
++*******************************************************************************/
+
+ #ifndef _E1000_NVM_H_
+ #define _E1000_NVM_H_
+
+-s32 igb_acquire_nvm(struct e1000_hw *hw);
+-void igb_release_nvm(struct e1000_hw *hw);
+-s32 igb_read_mac_addr(struct e1000_hw *hw);
+-s32 igb_read_part_num(struct e1000_hw *hw, u32 *part_num);
+-s32 igb_read_part_string(struct e1000_hw *hw, u8 *part_num,
+- u32 part_num_size);
+-s32 igb_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
+-s32 igb_read_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
+-s32 igb_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
+-s32 igb_validate_nvm_checksum(struct e1000_hw *hw);
+-s32 igb_update_nvm_checksum(struct e1000_hw *hw);
+-
+ struct e1000_fw_version {
+ u32 etrack_id;
+ u16 eep_major;
+@@ -51,6 +40,31 @@
+ u16 or_build;
+ u16 or_patch;
+ };
+-void igb_get_fw_version(struct e1000_hw *hw, struct e1000_fw_version *fw_vers);
++
++void e1000_init_nvm_ops_generic(struct e1000_hw *hw);
++s32 e1000_null_read_nvm(struct e1000_hw *hw, u16 a, u16 b, u16 *c);
++void e1000_null_nvm_generic(struct e1000_hw *hw);
++s32 e1000_null_led_default(struct e1000_hw *hw, u16 *data);
++s32 e1000_null_write_nvm(struct e1000_hw *hw, u16 a, u16 b, u16 *c);
++s32 e1000_acquire_nvm_generic(struct e1000_hw *hw);
++
++s32 e1000_poll_eerd_eewr_done(struct e1000_hw *hw, int ee_reg);
++s32 igb_e1000_read_mac_addr_generic(struct e1000_hw *hw);
++s32 igb_e1000_read_pba_string_generic(struct e1000_hw *hw, u8 *pba_num,
++ u32 pba_num_size);
++s32 e1000_read_pba_length_generic(struct e1000_hw *hw, u32 *pba_num_size);
++s32 e1000_read_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
++s32 e1000_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words,
++ u16 *data);
++s32 e1000_valid_led_default_generic(struct e1000_hw *hw, u16 *data);
++s32 e1000_validate_nvm_checksum_generic(struct e1000_hw *hw);
++s32 e1000_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words,
++ u16 *data);
++s32 e1000_update_nvm_checksum_generic(struct e1000_hw *hw);
++void e1000_release_nvm_generic(struct e1000_hw *hw);
++void e1000_get_fw_version(struct e1000_hw *hw,
++ struct e1000_fw_version *fw_vers);
++
++#define E1000_STM_OPCODE 0xDB00
+
+ #endif
+diff -Nu a/drivers/net/ethernet/intel/igb/e1000_osdep.h b/drivers/net/ethernet/intel/igb/e1000_osdep.h
+--- a/drivers/net/ethernet/intel/igb/e1000_osdep.h 1970-01-01 00:00:00.000000000 +0000
++++ b/drivers/net/ethernet/intel/igb/e1000_osdep.h 2016-11-14 14:32:08.579567168 +0000
+@@ -0,0 +1,141 @@
++/*******************************************************************************
++
++ Intel(R) Gigabit Ethernet Linux driver
++ Copyright(c) 2007-2015 Intel Corporation.
++
++ This program is free software; you can redistribute it and/or modify it
++ under the terms and conditions of the GNU General Public License,
++ version 2, as published by the Free Software Foundation.
++
++ This program is distributed in the hope it will be useful, but WITHOUT
++ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ more details.
++
++ The full GNU General Public License is included in this distribution in
++ the file called "COPYING".
++
++ Contact Information:
++ Linux NICS
++ e1000-devel Mailing List
++ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
++
++*******************************************************************************/
++
++/* glue for the OS independent part of e1000
++ * includes register access macros
++ */
++
++#ifndef _E1000_OSDEP_H_
++#define _E1000_OSDEP_H_
++
++#include
++#include
++#include
++#include
++#include
++#include "kcompat.h"
++
++#define usec_delay(x) udelay(x)
++#define usec_delay_irq(x) udelay(x)
++#ifndef msec_delay
++#define msec_delay(x) do { \
++ /* Don't mdelay in interrupt context! */ \
++ if (in_interrupt()) \
++ BUG(); \
++ else \
++ msleep(x); \
++} while (0)
++
++/* Some workarounds require millisecond delays and are run during interrupt
++ * context. Most notably, when establishing link, the phy may need tweaking
++ * but cannot process phy register reads/writes faster than millisecond
++ * intervals...and we establish link due to a "link status change" interrupt.
++ */
++#define msec_delay_irq(x) mdelay(x)
++
++#define E1000_READ_REG(x, y) e1000_read_reg(x, y)
++#endif
++
++#define PCI_COMMAND_REGISTER PCI_COMMAND
++#define CMD_MEM_WRT_INVALIDATE PCI_COMMAND_INVALIDATE
++#define ETH_ADDR_LEN ETH_ALEN
++
++#ifdef __BIG_ENDIAN
++#define E1000_BIG_ENDIAN __BIG_ENDIAN
++#endif
++
++#ifdef DEBUG
++#define DEBUGOUT(S) pr_debug(S)
++#define DEBUGOUT1(S, A...) pr_debug(S, ## A)
++#else
++#define DEBUGOUT(S)
++#define DEBUGOUT1(S, A...)
++#endif
++
++#ifdef DEBUG_FUNC
++#define DEBUGFUNC(F) DEBUGOUT(F "\n")
++#else
++#define DEBUGFUNC(F)
++#endif
++#define DEBUGOUT2 DEBUGOUT1
++#define DEBUGOUT3 DEBUGOUT2
++#define DEBUGOUT7 DEBUGOUT3
++
++#define E1000_REGISTER(a, reg) reg
++
++/* forward declaration */
++struct e1000_hw;
++
++/* write operations, indexed using DWORDS */
++#define E1000_WRITE_REG(hw, reg, val) \
++do { \
++ u8 __iomem *hw_addr = ACCESS_ONCE((hw)->hw_addr); \
++ if (!E1000_REMOVED(hw_addr)) \
++ writel((val), &hw_addr[(reg)]); \
++} while (0)
++
++u32 e1000_read_reg(struct e1000_hw *hw, u32 reg);
++
++#define E1000_WRITE_REG_ARRAY(hw, reg, idx, val) \
++ E1000_WRITE_REG((hw), (reg) + ((idx) << 2), (val))
++
++#define E1000_READ_REG_ARRAY(hw, reg, idx) ( \
++ e1000_read_reg((hw), (reg) + ((idx) << 2)))
++
++#define E1000_READ_REG_ARRAY_DWORD E1000_READ_REG_ARRAY
++#define E1000_WRITE_REG_ARRAY_DWORD E1000_WRITE_REG_ARRAY
++
++#define E1000_WRITE_REG_ARRAY_WORD(a, reg, offset, value) ( \
++ writew((value), ((a)->hw_addr + E1000_REGISTER(a, reg) + \
++ ((offset) << 1))))
++
++#define E1000_READ_REG_ARRAY_WORD(a, reg, offset) ( \
++ readw((a)->hw_addr + E1000_REGISTER(a, reg) + ((offset) << 1)))
++
++#define E1000_WRITE_REG_ARRAY_BYTE(a, reg, offset, value) ( \
++ writeb((value), ((a)->hw_addr + E1000_REGISTER(a, reg) + (offset))))
++
++#define E1000_READ_REG_ARRAY_BYTE(a, reg, offset) ( \
++ readb((a)->hw_addr + E1000_REGISTER(a, reg) + (offset)))
++
++#define E1000_WRITE_REG_IO(a, reg, offset) do { \
++ outl(reg, ((a)->io_base)); \
++ outl(offset, ((a)->io_base + 4)); \
++ } while (0)
++
++#define E1000_WRITE_FLUSH(a) E1000_READ_REG(a, E1000_STATUS)
++
++#define E1000_WRITE_FLASH_REG(a, reg, value) ( \
++ writel((value), ((a)->flash_address + reg)))
++
++#define E1000_WRITE_FLASH_REG16(a, reg, value) ( \
++ writew((value), ((a)->flash_address + reg)))
++
++#define E1000_READ_FLASH_REG(a, reg) (readl((a)->flash_address + reg))
++
++#define E1000_READ_FLASH_REG16(a, reg) (readw((a)->flash_address + reg))
++
++#define E1000_REMOVED(h) unlikely(!(h))
++
++#endif /* _E1000_OSDEP_H_ */
+diff -Nu a/drivers/net/ethernet/intel/igb/e1000_phy.c b/drivers/net/ethernet/intel/igb/e1000_phy.c
+--- a/drivers/net/ethernet/intel/igb/e1000_phy.c 2016-11-13 09:20:24.790171605 +0000
++++ b/drivers/net/ethernet/intel/igb/e1000_phy.c 2016-11-14 14:32:08.579567168 +0000
+@@ -1,147 +1,271 @@
+-/* Intel(R) Gigabit Ethernet Linux driver
+- * Copyright(c) 2007-2014 Intel Corporation.
+- *
+- * This program is free software; you can redistribute it and/or modify it
+- * under the terms and conditions of the GNU General Public License,
+- * version 2, as published by the Free Software Foundation.
+- *
+- * This program is distributed in the hope it will be useful, but WITHOUT
+- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+- * more details.
+- *
+- * You should have received a copy of the GNU General Public License along with
+- * this program; if not, see .
+- *
+- * The full GNU General Public License is included in this distribution in
+- * the file called "COPYING".
+- *
+- * Contact Information:
+- * e1000-devel Mailing List
+- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+- */
+-
+-#include
+-#include
+-
+-#include "e1000_mac.h"
+-#include "e1000_phy.h"
+-
+-static s32 igb_phy_setup_autoneg(struct e1000_hw *hw);
+-static void igb_phy_force_speed_duplex_setup(struct e1000_hw *hw,
+- u16 *phy_ctrl);
+-static s32 igb_wait_autoneg(struct e1000_hw *hw);
+-static s32 igb_set_master_slave_mode(struct e1000_hw *hw);
++/*******************************************************************************
++
++ Intel(R) Gigabit Ethernet Linux driver
++ Copyright(c) 2007-2015 Intel Corporation.
++
++ This program is free software; you can redistribute it and/or modify it
++ under the terms and conditions of the GNU General Public License,
++ version 2, as published by the Free Software Foundation.
++
++ This program is distributed in the hope it will be useful, but WITHOUT
++ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ more details.
++
++ The full GNU General Public License is included in this distribution in
++ the file called "COPYING".
++
++ Contact Information:
++ Linux NICS
++ e1000-devel Mailing List
++ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
++*******************************************************************************/
++
++#include "e1000_api.h"
++
++static s32 e1000_wait_autoneg(struct e1000_hw *hw);
+ /* Cable length tables */
+ static const u16 e1000_m88_cable_length_table[] = {
+ 0, 50, 80, 110, 140, 140, E1000_CABLE_LENGTH_UNDEFINED };
+ #define M88E1000_CABLE_LENGTH_TABLE_SIZE \
+- (sizeof(e1000_m88_cable_length_table) / \
+- sizeof(e1000_m88_cable_length_table[0]))
++ (sizeof(e1000_m88_cable_length_table) / \
++ sizeof(e1000_m88_cable_length_table[0]))
+
+ static const u16 e1000_igp_2_cable_length_table[] = {
+- 0, 0, 0, 0, 0, 0, 0, 0, 3, 5, 8, 11, 13, 16, 18, 21,
+- 0, 0, 0, 3, 6, 10, 13, 16, 19, 23, 26, 29, 32, 35, 38, 41,
+- 6, 10, 14, 18, 22, 26, 30, 33, 37, 41, 44, 48, 51, 54, 58, 61,
+- 21, 26, 31, 35, 40, 44, 49, 53, 57, 61, 65, 68, 72, 75, 79, 82,
+- 40, 45, 51, 56, 61, 66, 70, 75, 79, 83, 87, 91, 94, 98, 101, 104,
+- 60, 66, 72, 77, 82, 87, 92, 96, 100, 104, 108, 111, 114, 117, 119, 121,
+- 83, 89, 95, 100, 105, 109, 113, 116, 119, 122, 124,
+- 104, 109, 114, 118, 121, 124};
++ 0, 0, 0, 0, 0, 0, 0, 0, 3, 5, 8, 11, 13, 16, 18, 21, 0, 0, 0, 3,
++ 6, 10, 13, 16, 19, 23, 26, 29, 32, 35, 38, 41, 6, 10, 14, 18, 22,
++ 26, 30, 33, 37, 41, 44, 48, 51, 54, 58, 61, 21, 26, 31, 35, 40,
++ 44, 49, 53, 57, 61, 65, 68, 72, 75, 79, 82, 40, 45, 51, 56, 61,
++ 66, 70, 75, 79, 83, 87, 91, 94, 98, 101, 104, 60, 66, 72, 77, 82,
++ 87, 92, 96, 100, 104, 108, 111, 114, 117, 119, 121, 83, 89, 95,
++ 100, 105, 109, 113, 116, 119, 122, 124, 104, 109, 114, 118, 121,
++ 124};
+ #define IGP02E1000_CABLE_LENGTH_TABLE_SIZE \
+- (sizeof(e1000_igp_2_cable_length_table) / \
+- sizeof(e1000_igp_2_cable_length_table[0]))
++ (sizeof(e1000_igp_2_cable_length_table) / \
++ sizeof(e1000_igp_2_cable_length_table[0]))
++
++/**
++ * e1000_init_phy_ops_generic - Initialize PHY function pointers
++ * @hw: pointer to the HW structure
++ *
++ * Setups up the function pointers to no-op functions
++ **/
++void e1000_init_phy_ops_generic(struct e1000_hw *hw)
++{
++ struct e1000_phy_info *phy = &hw->phy;
++ DEBUGFUNC("e1000_init_phy_ops_generic");
++
++ /* Initialize function pointers */
++ phy->ops.init_params = e1000_null_ops_generic;
++ phy->ops.acquire = e1000_null_ops_generic;
++ phy->ops.check_polarity = e1000_null_ops_generic;
++ phy->ops.check_reset_block = e1000_null_ops_generic;
++ phy->ops.commit = e1000_null_ops_generic;
++ phy->ops.force_speed_duplex = e1000_null_ops_generic;
++ phy->ops.get_cfg_done = e1000_null_ops_generic;
++ phy->ops.get_cable_length = e1000_null_ops_generic;
++ phy->ops.get_info = e1000_null_ops_generic;
++ phy->ops.set_page = e1000_null_set_page;
++ phy->ops.read_reg = e1000_null_read_reg;
++ phy->ops.read_reg_locked = e1000_null_read_reg;
++ phy->ops.read_reg_page = e1000_null_read_reg;
++ phy->ops.release = e1000_null_phy_generic;
++ phy->ops.reset = e1000_null_ops_generic;
++ phy->ops.set_d0_lplu_state = e1000_null_lplu_state;
++ phy->ops.set_d3_lplu_state = e1000_null_lplu_state;
++ phy->ops.write_reg = e1000_null_write_reg;
++ phy->ops.write_reg_locked = e1000_null_write_reg;
++ phy->ops.write_reg_page = e1000_null_write_reg;
++ phy->ops.power_up = e1000_null_phy_generic;
++ phy->ops.power_down = e1000_null_phy_generic;
++ phy->ops.read_i2c_byte = e1000_read_i2c_byte_null;
++ phy->ops.write_i2c_byte = e1000_write_i2c_byte_null;
++}
++
++/**
++ * e1000_null_set_page - No-op function, return 0
++ * @hw: pointer to the HW structure
++ **/
++s32 e1000_null_set_page(struct e1000_hw E1000_UNUSEDARG *hw,
++ u16 E1000_UNUSEDARG data)
++{
++ DEBUGFUNC("e1000_null_set_page");
++ return E1000_SUCCESS;
++}
++
++/**
++ * e1000_null_read_reg - No-op function, return 0
++ * @hw: pointer to the HW structure
++ **/
++s32 e1000_null_read_reg(struct e1000_hw E1000_UNUSEDARG *hw,
++ u32 E1000_UNUSEDARG offset, u16 E1000_UNUSEDARG *data)
++{
++ DEBUGFUNC("e1000_null_read_reg");
++ return E1000_SUCCESS;
++}
++
++/**
++ * e1000_null_phy_generic - No-op function, return void
++ * @hw: pointer to the HW structure
++ **/
++void e1000_null_phy_generic(struct e1000_hw E1000_UNUSEDARG *hw)
++{
++ DEBUGFUNC("e1000_null_phy_generic");
++ return;
++}
++
++/**
++ * e1000_null_lplu_state - No-op function, return 0
++ * @hw: pointer to the HW structure
++ **/
++s32 e1000_null_lplu_state(struct e1000_hw E1000_UNUSEDARG *hw,
++ bool E1000_UNUSEDARG active)
++{
++ DEBUGFUNC("e1000_null_lplu_state");
++ return E1000_SUCCESS;
++}
++
++/**
++ * e1000_null_write_reg - No-op function, return 0
++ * @hw: pointer to the HW structure
++ **/
++s32 e1000_null_write_reg(struct e1000_hw E1000_UNUSEDARG *hw,
++ u32 E1000_UNUSEDARG offset, u16 E1000_UNUSEDARG data)
++{
++ DEBUGFUNC("e1000_null_write_reg");
++ return E1000_SUCCESS;
++}
++
++/**
++ * e1000_read_i2c_byte_null - No-op function, return 0
++ * @hw: pointer to hardware structure
++ * @byte_offset: byte offset to write
++ * @dev_addr: device address
++ * @data: data value read
++ *
++ **/
++s32 e1000_read_i2c_byte_null(struct e1000_hw E1000_UNUSEDARG *hw,
++ u8 E1000_UNUSEDARG byte_offset,
++ u8 E1000_UNUSEDARG dev_addr,
++ u8 E1000_UNUSEDARG *data)
++{
++ DEBUGFUNC("e1000_read_i2c_byte_null");
++ return E1000_SUCCESS;
++}
++
++/**
++ * e1000_write_i2c_byte_null - No-op function, return 0
++ * @hw: pointer to hardware structure
++ * @byte_offset: byte offset to write
++ * @dev_addr: device address
++ * @data: data value to write
++ *
++ **/
++s32 e1000_write_i2c_byte_null(struct e1000_hw E1000_UNUSEDARG *hw,
++ u8 E1000_UNUSEDARG byte_offset,
++ u8 E1000_UNUSEDARG dev_addr,
++ u8 E1000_UNUSEDARG data)
++{
++ DEBUGFUNC("e1000_write_i2c_byte_null");
++ return E1000_SUCCESS;
++}
+
+ /**
+- * igb_check_reset_block - Check if PHY reset is blocked
++ * e1000_check_reset_block_generic - Check if PHY reset is blocked
+ * @hw: pointer to the HW structure
+ *
+ * Read the PHY management control register and check whether a PHY reset
+- * is blocked. If a reset is not blocked return 0, otherwise
++ * is blocked. If a reset is not blocked return E1000_SUCCESS, otherwise
+ * return E1000_BLK_PHY_RESET (12).
+ **/
+-s32 igb_check_reset_block(struct e1000_hw *hw)
++s32 e1000_check_reset_block_generic(struct e1000_hw *hw)
+ {
+ u32 manc;
+
+- manc = rd32(E1000_MANC);
++ DEBUGFUNC("e1000_check_reset_block");
++
++ manc = E1000_READ_REG(hw, E1000_MANC);
+
+- return (manc & E1000_MANC_BLK_PHY_RST_ON_IDE) ? E1000_BLK_PHY_RESET : 0;
++ return (manc & E1000_MANC_BLK_PHY_RST_ON_IDE) ?
++ E1000_BLK_PHY_RESET : E1000_SUCCESS;
+ }
+
+ /**
+- * igb_get_phy_id - Retrieve the PHY ID and revision
++ * e1000_get_phy_id - Retrieve the PHY ID and revision
+ * @hw: pointer to the HW structure
+ *
+ * Reads the PHY registers and stores the PHY ID and possibly the PHY
+ * revision in the hardware structure.
+ **/
+-s32 igb_get_phy_id(struct e1000_hw *hw)
++s32 e1000_get_phy_id(struct e1000_hw *hw)
+ {
+ struct e1000_phy_info *phy = &hw->phy;
+- s32 ret_val = 0;
++ s32 ret_val = E1000_SUCCESS;
+ u16 phy_id;
+
++ DEBUGFUNC("e1000_get_phy_id");
++
++ if (!phy->ops.read_reg)
++ return E1000_SUCCESS;
++
+ ret_val = phy->ops.read_reg(hw, PHY_ID1, &phy_id);
+ if (ret_val)
+- goto out;
++ return ret_val;
+
+ phy->id = (u32)(phy_id << 16);
+- udelay(20);
++ usec_delay(20);
+ ret_val = phy->ops.read_reg(hw, PHY_ID2, &phy_id);
+ if (ret_val)
+- goto out;
++ return ret_val;
+
+ phy->id |= (u32)(phy_id & PHY_REVISION_MASK);
+ phy->revision = (u32)(phy_id & ~PHY_REVISION_MASK);
+
+-out:
+- return ret_val;
++ return E1000_SUCCESS;
+ }
+
+ /**
+- * igb_phy_reset_dsp - Reset PHY DSP
++ * e1000_phy_reset_dsp_generic - Reset PHY DSP
+ * @hw: pointer to the HW structure
+ *
+ * Reset the digital signal processor.
+ **/
+-static s32 igb_phy_reset_dsp(struct e1000_hw *hw)
++s32 e1000_phy_reset_dsp_generic(struct e1000_hw *hw)
+ {
+- s32 ret_val = 0;
++ s32 ret_val;
+
+- if (!(hw->phy.ops.write_reg))
+- goto out;
++ DEBUGFUNC("e1000_phy_reset_dsp_generic");
++
++ if (!hw->phy.ops.write_reg)
++ return E1000_SUCCESS;
+
+ ret_val = hw->phy.ops.write_reg(hw, M88E1000_PHY_GEN_CONTROL, 0xC1);
+ if (ret_val)
+- goto out;
+-
+- ret_val = hw->phy.ops.write_reg(hw, M88E1000_PHY_GEN_CONTROL, 0);
++ return ret_val;
+
+-out:
+- return ret_val;
++ return hw->phy.ops.write_reg(hw, M88E1000_PHY_GEN_CONTROL, 0);
+ }
+
+ /**
+- * igb_read_phy_reg_mdic - Read MDI control register
++ * e1000_read_phy_reg_mdic - Read MDI control register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to be read
+ * @data: pointer to the read data
+ *
+- * Reads the MDI control regsiter in the PHY at offset and stores the
++ * Reads the MDI control register in the PHY at offset and stores the
+ * information read to data.
+ **/
+-s32 igb_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data)
++s32 e1000_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data)
+ {
+ struct e1000_phy_info *phy = &hw->phy;
+ u32 i, mdic = 0;
+- s32 ret_val = 0;
++
++ DEBUGFUNC("e1000_read_phy_reg_mdic");
+
+ if (offset > MAX_PHY_REG_ADDRESS) {
+- hw_dbg("PHY Address %d is out of range\n", offset);
+- ret_val = -E1000_ERR_PARAM;
+- goto out;
++ DEBUGOUT1("PHY Address %d is out of range\n", offset);
++ return -E1000_ERR_PARAM;
+ }
+
+ /* Set up Op-code, Phy Address, and register offset in the MDI
+@@ -152,52 +276,55 @@
+ (phy->addr << E1000_MDIC_PHY_SHIFT) |
+ (E1000_MDIC_OP_READ));
+
+- wr32(E1000_MDIC, mdic);
++ E1000_WRITE_REG(hw, E1000_MDIC, mdic);
+
+ /* Poll the ready bit to see if the MDI read completed
+ * Increasing the time out as testing showed failures with
+ * the lower time out
+ */
+ for (i = 0; i < (E1000_GEN_POLL_TIMEOUT * 3); i++) {
+- udelay(50);
+- mdic = rd32(E1000_MDIC);
++ usec_delay_irq(50);
++ mdic = E1000_READ_REG(hw, E1000_MDIC);
+ if (mdic & E1000_MDIC_READY)
+ break;
+ }
+ if (!(mdic & E1000_MDIC_READY)) {
+- hw_dbg("MDI Read did not complete\n");
+- ret_val = -E1000_ERR_PHY;
+- goto out;
++ DEBUGOUT("MDI Read did not complete\n");
++ return -E1000_ERR_PHY;
+ }
+ if (mdic & E1000_MDIC_ERROR) {
+- hw_dbg("MDI Error\n");
+- ret_val = -E1000_ERR_PHY;
+- goto out;
++ DEBUGOUT("MDI Error\n");
++ return -E1000_ERR_PHY;
++ }
++ if (((mdic & E1000_MDIC_REG_MASK) >> E1000_MDIC_REG_SHIFT) != offset) {
++ DEBUGOUT2("MDI Read offset error - requested %d, returned %d\n",
++ offset,
++ (mdic & E1000_MDIC_REG_MASK) >> E1000_MDIC_REG_SHIFT);
++ return -E1000_ERR_PHY;
+ }
+ *data = (u16) mdic;
+
+-out:
+- return ret_val;
++ return E1000_SUCCESS;
+ }
+
+ /**
+- * igb_write_phy_reg_mdic - Write MDI control register
++ * e1000_write_phy_reg_mdic - Write MDI control register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to write to
+ * @data: data to write to register at offset
+ *
+ * Writes data to MDI control register in the PHY at offset.
+ **/
+-s32 igb_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data)
++s32 e1000_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data)
+ {
+ struct e1000_phy_info *phy = &hw->phy;
+ u32 i, mdic = 0;
+- s32 ret_val = 0;
++
++ DEBUGFUNC("e1000_write_phy_reg_mdic");
+
+ if (offset > MAX_PHY_REG_ADDRESS) {
+- hw_dbg("PHY Address %d is out of range\n", offset);
+- ret_val = -E1000_ERR_PARAM;
+- goto out;
++ DEBUGOUT1("PHY Address %d is out of range\n", offset);
++ return -E1000_ERR_PARAM;
+ }
+
+ /* Set up Op-code, Phy Address, and register offset in the MDI
+@@ -209,35 +336,38 @@
+ (phy->addr << E1000_MDIC_PHY_SHIFT) |
+ (E1000_MDIC_OP_WRITE));
+
+- wr32(E1000_MDIC, mdic);
++ E1000_WRITE_REG(hw, E1000_MDIC, mdic);
+
+ /* Poll the ready bit to see if the MDI read completed
+ * Increasing the time out as testing showed failures with
+ * the lower time out
+ */
+ for (i = 0; i < (E1000_GEN_POLL_TIMEOUT * 3); i++) {
+- udelay(50);
+- mdic = rd32(E1000_MDIC);
++ usec_delay_irq(50);
++ mdic = E1000_READ_REG(hw, E1000_MDIC);
+ if (mdic & E1000_MDIC_READY)
+ break;
+ }
+ if (!(mdic & E1000_MDIC_READY)) {
+- hw_dbg("MDI Write did not complete\n");
+- ret_val = -E1000_ERR_PHY;
+- goto out;
++ DEBUGOUT("MDI Write did not complete\n");
++ return -E1000_ERR_PHY;
+ }
+ if (mdic & E1000_MDIC_ERROR) {
+- hw_dbg("MDI Error\n");
+- ret_val = -E1000_ERR_PHY;
+- goto out;
++ DEBUGOUT("MDI Error\n");
++ return -E1000_ERR_PHY;
++ }
++ if (((mdic & E1000_MDIC_REG_MASK) >> E1000_MDIC_REG_SHIFT) != offset) {
++ DEBUGOUT2("MDI Write offset error - requested %d, returned %d\n",
++ offset,
++ (mdic & E1000_MDIC_REG_MASK) >> E1000_MDIC_REG_SHIFT);
++ return -E1000_ERR_PHY;
+ }
+
+-out:
+- return ret_val;
++ return E1000_SUCCESS;
+ }
+
+ /**
+- * igb_read_phy_reg_i2c - Read PHY register using i2c
++ * e1000_read_phy_reg_i2c - Read PHY register using i2c
+ * @hw: pointer to the HW structure
+ * @offset: register offset to be read
+ * @data: pointer to the read data
+@@ -245,11 +375,13 @@
+ * Reads the PHY register at offset using the i2c interface and stores the
+ * retrieved information in data.
+ **/
+-s32 igb_read_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 *data)
++s32 e1000_read_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 *data)
+ {
+ struct e1000_phy_info *phy = &hw->phy;
+ u32 i, i2ccmd = 0;
+
++ DEBUGFUNC("e1000_read_phy_reg_i2c");
++
+ /* Set up Op-code, Phy Address, and register address in the I2CCMD
+ * register. The MAC will take care of interfacing with the
+ * PHY to retrieve the desired data.
+@@ -258,47 +390,49 @@
+ (phy->addr << E1000_I2CCMD_PHY_ADDR_SHIFT) |
+ (E1000_I2CCMD_OPCODE_READ));
+
+- wr32(E1000_I2CCMD, i2ccmd);
++ E1000_WRITE_REG(hw, E1000_I2CCMD, i2ccmd);
+
+ /* Poll the ready bit to see if the I2C read completed */
+ for (i = 0; i < E1000_I2CCMD_PHY_TIMEOUT; i++) {
+- udelay(50);
+- i2ccmd = rd32(E1000_I2CCMD);
++ usec_delay(50);
++ i2ccmd = E1000_READ_REG(hw, E1000_I2CCMD);
+ if (i2ccmd & E1000_I2CCMD_READY)
+ break;
+ }
+ if (!(i2ccmd & E1000_I2CCMD_READY)) {
+- hw_dbg("I2CCMD Read did not complete\n");
++ DEBUGOUT("I2CCMD Read did not complete\n");
+ return -E1000_ERR_PHY;
+ }
+ if (i2ccmd & E1000_I2CCMD_ERROR) {
+- hw_dbg("I2CCMD Error bit set\n");
++ DEBUGOUT("I2CCMD Error bit set\n");
+ return -E1000_ERR_PHY;
+ }
+
+ /* Need to byte-swap the 16-bit value. */
+ *data = ((i2ccmd >> 8) & 0x00FF) | ((i2ccmd << 8) & 0xFF00);
+
+- return 0;
++ return E1000_SUCCESS;
+ }
+
+ /**
+- * igb_write_phy_reg_i2c - Write PHY register using i2c
++ * e1000_write_phy_reg_i2c - Write PHY register using i2c
+ * @hw: pointer to the HW structure
+ * @offset: register offset to write to
+ * @data: data to write at register offset
+ *
+ * Writes the data to PHY register at the offset using the i2c interface.
+ **/
+-s32 igb_write_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 data)
++s32 e1000_write_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 data)
+ {
+ struct e1000_phy_info *phy = &hw->phy;
+ u32 i, i2ccmd = 0;
+ u16 phy_data_swapped;
+
++ DEBUGFUNC("e1000_write_phy_reg_i2c");
++
+ /* Prevent overwritting SFP I2C EEPROM which is at A0 address.*/
+ if ((hw->phy.addr == 0) || (hw->phy.addr > 7)) {
+- hw_dbg("PHY I2C Address %d is out of range.\n",
++ DEBUGOUT1("PHY I2C Address %d is out of range.\n",
+ hw->phy.addr);
+ return -E1000_ERR_CONFIG;
+ }
+@@ -315,29 +449,29 @@
+ E1000_I2CCMD_OPCODE_WRITE |
+ phy_data_swapped);
+
+- wr32(E1000_I2CCMD, i2ccmd);
++ E1000_WRITE_REG(hw, E1000_I2CCMD, i2ccmd);
+
+ /* Poll the ready bit to see if the I2C read completed */
+ for (i = 0; i < E1000_I2CCMD_PHY_TIMEOUT; i++) {
+- udelay(50);
+- i2ccmd = rd32(E1000_I2CCMD);
++ usec_delay(50);
++ i2ccmd = E1000_READ_REG(hw, E1000_I2CCMD);
+ if (i2ccmd & E1000_I2CCMD_READY)
+ break;
+ }
+ if (!(i2ccmd & E1000_I2CCMD_READY)) {
+- hw_dbg("I2CCMD Write did not complete\n");
++ DEBUGOUT("I2CCMD Write did not complete\n");
+ return -E1000_ERR_PHY;
+ }
+ if (i2ccmd & E1000_I2CCMD_ERROR) {
+- hw_dbg("I2CCMD Error bit set\n");
++ DEBUGOUT("I2CCMD Error bit set\n");
+ return -E1000_ERR_PHY;
+ }
+
+- return 0;
++ return E1000_SUCCESS;
+ }
+
+ /**
+- * igb_read_sfp_data_byte - Reads SFP module data.
++ * e1000_read_sfp_data_byte - Reads SFP module data.
+ * @hw: pointer to the HW structure
+ * @offset: byte location offset to be read
+ * @data: read data buffer pointer
+@@ -349,14 +483,16 @@
+ * E1000_I2CCMD_SFP_DIAG_ADDR() for SFP diagnostics parameters
+ * access
+ **/
+-s32 igb_read_sfp_data_byte(struct e1000_hw *hw, u16 offset, u8 *data)
++s32 e1000_read_sfp_data_byte(struct e1000_hw *hw, u16 offset, u8 *data)
+ {
+ u32 i = 0;
+ u32 i2ccmd = 0;
+ u32 data_local = 0;
+
++ DEBUGFUNC("e1000_read_sfp_data_byte");
++
+ if (offset > E1000_I2CCMD_SFP_DIAG_ADDR(255)) {
+- hw_dbg("I2CCMD command address exceeds upper limit\n");
++ DEBUGOUT("I2CCMD command address exceeds upper limit\n");
+ return -E1000_ERR_PHY;
+ }
+
+@@ -367,30 +503,103 @@
+ i2ccmd = ((offset << E1000_I2CCMD_REG_ADDR_SHIFT) |
+ E1000_I2CCMD_OPCODE_READ);
+
+- wr32(E1000_I2CCMD, i2ccmd);
++ E1000_WRITE_REG(hw, E1000_I2CCMD, i2ccmd);
+
+ /* Poll the ready bit to see if the I2C read completed */
+ for (i = 0; i < E1000_I2CCMD_PHY_TIMEOUT; i++) {
+- udelay(50);
+- data_local = rd32(E1000_I2CCMD);
++ usec_delay(50);
++ data_local = E1000_READ_REG(hw, E1000_I2CCMD);
+ if (data_local & E1000_I2CCMD_READY)
+ break;
+ }
+ if (!(data_local & E1000_I2CCMD_READY)) {
+- hw_dbg("I2CCMD Read did not complete\n");
++ DEBUGOUT("I2CCMD Read did not complete\n");
+ return -E1000_ERR_PHY;
+ }
+ if (data_local & E1000_I2CCMD_ERROR) {
+- hw_dbg("I2CCMD Error bit set\n");
++ DEBUGOUT("I2CCMD Error bit set\n");
+ return -E1000_ERR_PHY;
+ }
+ *data = (u8) data_local & 0xFF;
+
+- return 0;
++ return E1000_SUCCESS;
+ }
+
+ /**
+- * igb_read_phy_reg_igp - Read igp PHY register
++ * e1000_write_sfp_data_byte - Writes SFP module data.
++ * @hw: pointer to the HW structure
++ * @offset: byte location offset to write to
++ * @data: data to write
++ *
++ * Writes one byte to SFP module data stored
++ * in SFP resided EEPROM memory or SFP diagnostic area.
++ * Function should be called with
++ * E1000_I2CCMD_SFP_DATA_ADDR() for SFP module database access
++ * E1000_I2CCMD_SFP_DIAG_ADDR() for SFP diagnostics parameters
++ * access
++ **/
++s32 e1000_write_sfp_data_byte(struct e1000_hw *hw, u16 offset, u8 data)
++{
++ u32 i = 0;
++ u32 i2ccmd = 0;
++ u32 data_local = 0;
++
++ DEBUGFUNC("e1000_write_sfp_data_byte");
++
++ if (offset > E1000_I2CCMD_SFP_DIAG_ADDR(255)) {
++ DEBUGOUT("I2CCMD command address exceeds upper limit\n");
++ return -E1000_ERR_PHY;
++ }
++ /* The programming interface is 16 bits wide
++ * so we need to read the whole word first
++ * then update appropriate byte lane and write
++ * the updated word back.
++ */
++ /* Set up Op-code, EEPROM Address,in the I2CCMD
++ * register. The MAC will take care of interfacing
++ * with an EEPROM to write the data given.
++ */
++ i2ccmd = ((offset << E1000_I2CCMD_REG_ADDR_SHIFT) |
++ E1000_I2CCMD_OPCODE_READ);
++ /* Set a command to read single word */
++ E1000_WRITE_REG(hw, E1000_I2CCMD, i2ccmd);
++ for (i = 0; i < E1000_I2CCMD_PHY_TIMEOUT; i++) {
++ usec_delay(50);
++ /* Poll the ready bit to see if lastly
++ * launched I2C operation completed
++ */
++ i2ccmd = E1000_READ_REG(hw, E1000_I2CCMD);
++ if (i2ccmd & E1000_I2CCMD_READY) {
++ /* Check if this is READ or WRITE phase */
++ if ((i2ccmd & E1000_I2CCMD_OPCODE_READ) ==
++ E1000_I2CCMD_OPCODE_READ) {
++ /* Write the selected byte
++ * lane and update whole word
++ */
++ data_local = i2ccmd & 0xFF00;
++ data_local |= data;
++ i2ccmd = ((offset <<
++ E1000_I2CCMD_REG_ADDR_SHIFT) |
++ E1000_I2CCMD_OPCODE_WRITE | data_local);
++ E1000_WRITE_REG(hw, E1000_I2CCMD, i2ccmd);
++ } else {
++ break;
++ }
++ }
++ }
++ if (!(i2ccmd & E1000_I2CCMD_READY)) {
++ DEBUGOUT("I2CCMD Write did not complete\n");
++ return -E1000_ERR_PHY;
++ }
++ if (i2ccmd & E1000_I2CCMD_ERROR) {
++ DEBUGOUT("I2CCMD Error bit set\n");
++ return -E1000_ERR_PHY;
++ }
++ return E1000_SUCCESS;
++}
++
++/**
++ * e1000_read_phy_reg_m88 - Read m88 PHY register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to be read
+ * @data: pointer to the read data
+@@ -399,38 +608,29 @@
+ * and storing the retrieved information in data. Release any acquired
+ * semaphores before exiting.
+ **/
+-s32 igb_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data)
++s32 e1000_read_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 *data)
+ {
+- s32 ret_val = 0;
++ s32 ret_val;
+
+- if (!(hw->phy.ops.acquire))
+- goto out;
++ DEBUGFUNC("e1000_read_phy_reg_m88");
++
++ if (!hw->phy.ops.acquire)
++ return E1000_SUCCESS;
+
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+- goto out;
+-
+- if (offset > MAX_PHY_MULTI_PAGE_REG) {
+- ret_val = igb_write_phy_reg_mdic(hw,
+- IGP01E1000_PHY_PAGE_SELECT,
+- (u16)offset);
+- if (ret_val) {
+- hw->phy.ops.release(hw);
+- goto out;
+- }
+- }
++ return ret_val;
+
+- ret_val = igb_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
+- data);
++ ret_val = e1000_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
++ data);
+
+ hw->phy.ops.release(hw);
+
+-out:
+ return ret_val;
+ }
+
+ /**
+- * igb_write_phy_reg_igp - Write igp PHY register
++ * e1000_write_phy_reg_m88 - Write m88 PHY register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to write to
+ * @data: data to write at register offset
+@@ -438,80 +638,415 @@
+ * Acquires semaphore, if necessary, then writes the data to PHY register
+ * at the offset. Release any acquired semaphores before exiting.
+ **/
+-s32 igb_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data)
++s32 e1000_write_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 data)
+ {
+- s32 ret_val = 0;
++ s32 ret_val;
++
++ DEBUGFUNC("e1000_write_phy_reg_m88");
+
+- if (!(hw->phy.ops.acquire))
+- goto out;
++ if (!hw->phy.ops.acquire)
++ return E1000_SUCCESS;
+
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+- goto out;
++ return ret_val;
+
+- if (offset > MAX_PHY_MULTI_PAGE_REG) {
+- ret_val = igb_write_phy_reg_mdic(hw,
+- IGP01E1000_PHY_PAGE_SELECT,
+- (u16)offset);
+- if (ret_val) {
+- hw->phy.ops.release(hw);
+- goto out;
+- }
++ ret_val = e1000_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
++ data);
++
++ hw->phy.ops.release(hw);
++
++ return ret_val;
++}
++
++/**
++ * igb_e1000_set_page_igp - Set page as on IGP-like PHY(s)
++ * @hw: pointer to the HW structure
++ * @page: page to set (shifted left when necessary)
++ *
++ * Sets PHY page required for PHY register access. Assumes semaphore is
++ * already acquired. Note, this function sets phy.addr to 1 so the caller
++ * must set it appropriately (if necessary) after this function returns.
++ **/
++/* Changed name, duplicated with e1000 */
++s32 igb_e1000_set_page_igp(struct e1000_hw *hw, u16 page)
++{
++ DEBUGFUNC("igb_e1000_set_page_igp");
++
++ DEBUGOUT1("Setting page 0x%x\n", page);
++
++ hw->phy.addr = 1;
++
++ return e1000_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, page);
++}
++
++/**
++ * __e1000_read_phy_reg_igp - Read igp PHY register
++ * @hw: pointer to the HW structure
++ * @offset: register offset to be read
++ * @data: pointer to the read data
++ * @locked: semaphore has already been acquired or not
++ *
++ * Acquires semaphore, if necessary, then reads the PHY register at offset
++ * and stores the retrieved information in data. Release any acquired
++ * semaphores before exiting.
++ **/
++static s32 __e1000_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data,
++ bool locked)
++{
++ s32 ret_val = E1000_SUCCESS;
++
++ DEBUGFUNC("__e1000_read_phy_reg_igp");
++
++ if (!locked) {
++ if (!hw->phy.ops.acquire)
++ return E1000_SUCCESS;
++
++ ret_val = hw->phy.ops.acquire(hw);
++ if (ret_val)
++ return ret_val;
+ }
+
+- ret_val = igb_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
+- data);
++ if (offset > MAX_PHY_MULTI_PAGE_REG)
++ ret_val = e1000_write_phy_reg_mdic(hw,
++ IGP01E1000_PHY_PAGE_SELECT,
++ (u16)offset);
++ if (!ret_val)
++ ret_val = e1000_read_phy_reg_mdic(hw,
++ MAX_PHY_REG_ADDRESS & offset,
++ data);
++ if (!locked)
++ hw->phy.ops.release(hw);
+
+- hw->phy.ops.release(hw);
++ return ret_val;
++}
++
++/**
++ * e1000_read_phy_reg_igp - Read igp PHY register
++ * @hw: pointer to the HW structure
++ * @offset: register offset to be read
++ * @data: pointer to the read data
++ *
++ * Acquires semaphore then reads the PHY register at offset and stores the
++ * retrieved information in data.
++ * Release the acquired semaphore before exiting.
++ **/
++s32 e1000_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data)
++{
++ return __e1000_read_phy_reg_igp(hw, offset, data, false);
++}
++
++/**
++ * e1000_read_phy_reg_igp_locked - Read igp PHY register
++ * @hw: pointer to the HW structure
++ * @offset: register offset to be read
++ * @data: pointer to the read data
++ *
++ * Reads the PHY register at offset and stores the retrieved information
++ * in data. Assumes semaphore already acquired.
++ **/
++s32 e1000_read_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, u16 *data)
++{
++ return __e1000_read_phy_reg_igp(hw, offset, data, true);
++}
++
++/**
++ * e1000_write_phy_reg_igp - Write igp PHY register
++ * @hw: pointer to the HW structure
++ * @offset: register offset to write to
++ * @data: data to write at register offset
++ * @locked: semaphore has already been acquired or not
++ *
++ * Acquires semaphore, if necessary, then writes the data to PHY register
++ * at the offset. Release any acquired semaphores before exiting.
++ **/
++static s32 __e1000_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data,
++ bool locked)
++{
++ s32 ret_val = E1000_SUCCESS;
++
++ DEBUGFUNC("e1000_write_phy_reg_igp");
++
++ if (!locked) {
++ if (!hw->phy.ops.acquire)
++ return E1000_SUCCESS;
++
++ ret_val = hw->phy.ops.acquire(hw);
++ if (ret_val)
++ return ret_val;
++ }
++
++ if (offset > MAX_PHY_MULTI_PAGE_REG)
++ ret_val = e1000_write_phy_reg_mdic(hw,
++ IGP01E1000_PHY_PAGE_SELECT,
++ (u16)offset);
++ if (!ret_val)
++ ret_val = e1000_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS &
++ offset,
++ data);
++ if (!locked)
++ hw->phy.ops.release(hw);
+
+-out:
+ return ret_val;
+ }
+
+ /**
+- * igb_copper_link_setup_82580 - Setup 82580 PHY for copper link
++ * e1000_write_phy_reg_igp - Write igp PHY register
+ * @hw: pointer to the HW structure
++ * @offset: register offset to write to
++ * @data: data to write at register offset
+ *
+- * Sets up Carrier-sense on Transmit and downshift values.
++ * Acquires semaphore then writes the data to PHY register
++ * at the offset. Release any acquired semaphores before exiting.
+ **/
+-s32 igb_copper_link_setup_82580(struct e1000_hw *hw)
++s32 e1000_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data)
++{
++ return __e1000_write_phy_reg_igp(hw, offset, data, false);
++}
++
++/**
++ * e1000_write_phy_reg_igp_locked - Write igp PHY register
++ * @hw: pointer to the HW structure
++ * @offset: register offset to write to
++ * @data: data to write at register offset
++ *
++ * Writes the data to PHY register at the offset.
++ * Assumes semaphore already acquired.
++ **/
++s32 e1000_write_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, u16 data)
++{
++ return __e1000_write_phy_reg_igp(hw, offset, data, true);
++}
++
++/**
++ * __e1000_read_kmrn_reg - Read kumeran register
++ * @hw: pointer to the HW structure
++ * @offset: register offset to be read
++ * @data: pointer to the read data
++ * @locked: semaphore has already been acquired or not
++ *
++ * Acquires semaphore, if necessary. Then reads the PHY register at offset
++ * using the kumeran interface. The information retrieved is stored in data.
++ * Release any acquired semaphores before exiting.
++ **/
++static s32 __e1000_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data,
++ bool locked)
++{
++ u32 kmrnctrlsta;
++
++ DEBUGFUNC("__e1000_read_kmrn_reg");
++
++ if (!locked) {
++ s32 ret_val = E1000_SUCCESS;
++
++ if (!hw->phy.ops.acquire)
++ return E1000_SUCCESS;
++
++ ret_val = hw->phy.ops.acquire(hw);
++ if (ret_val)
++ return ret_val;
++ }
++
++ kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) &
++ E1000_KMRNCTRLSTA_OFFSET) | E1000_KMRNCTRLSTA_REN;
++ E1000_WRITE_REG(hw, E1000_KMRNCTRLSTA, kmrnctrlsta);
++ E1000_WRITE_FLUSH(hw);
++
++ usec_delay(2);
++
++ kmrnctrlsta = E1000_READ_REG(hw, E1000_KMRNCTRLSTA);
++ *data = (u16)kmrnctrlsta;
++
++ if (!locked)
++ hw->phy.ops.release(hw);
++
++ return E1000_SUCCESS;
++}
++
++/**
++ * e1000_read_kmrn_reg_generic - Read kumeran register
++ * @hw: pointer to the HW structure
++ * @offset: register offset to be read
++ * @data: pointer to the read data
++ *
++ * Acquires semaphore then reads the PHY register at offset using the
++ * kumeran interface. The information retrieved is stored in data.
++ * Release the acquired semaphore before exiting.
++ **/
++s32 e1000_read_kmrn_reg_generic(struct e1000_hw *hw, u32 offset, u16 *data)
++{
++ return __e1000_read_kmrn_reg(hw, offset, data, false);
++}
++
++/**
++ * e1000_read_kmrn_reg_locked - Read kumeran register
++ * @hw: pointer to the HW structure
++ * @offset: register offset to be read
++ * @data: pointer to the read data
++ *
++ * Reads the PHY register at offset using the kumeran interface. The
++ * information retrieved is stored in data.
++ * Assumes semaphore already acquired.
++ **/
++s32 e1000_read_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, u16 *data)
++{
++ return __e1000_read_kmrn_reg(hw, offset, data, true);
++}
++
++/**
++ * __e1000_write_kmrn_reg - Write kumeran register
++ * @hw: pointer to the HW structure
++ * @offset: register offset to write to
++ * @data: data to write at register offset
++ * @locked: semaphore has already been acquired or not
++ *
++ * Acquires semaphore, if necessary. Then write the data to PHY register
++ * at the offset using the kumeran interface. Release any acquired semaphores
++ * before exiting.
++ **/
++static s32 __e1000_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data,
++ bool locked)
++{
++ u32 kmrnctrlsta;
++
++ DEBUGFUNC("e1000_write_kmrn_reg_generic");
++
++ if (!locked) {
++ s32 ret_val = E1000_SUCCESS;
++
++ if (!hw->phy.ops.acquire)
++ return E1000_SUCCESS;
++
++ ret_val = hw->phy.ops.acquire(hw);
++ if (ret_val)
++ return ret_val;
++ }
++
++ kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) &
++ E1000_KMRNCTRLSTA_OFFSET) | data;
++ E1000_WRITE_REG(hw, E1000_KMRNCTRLSTA, kmrnctrlsta);
++ E1000_WRITE_FLUSH(hw);
++
++ usec_delay(2);
++
++ if (!locked)
++ hw->phy.ops.release(hw);
++
++ return E1000_SUCCESS;
++}
++
++/**
++ * e1000_write_kmrn_reg_generic - Write kumeran register
++ * @hw: pointer to the HW structure
++ * @offset: register offset to write to
++ * @data: data to write at register offset
++ *
++ * Acquires semaphore then writes the data to the PHY register at the offset
++ * using the kumeran interface. Release the acquired semaphore before exiting.
++ **/
++s32 e1000_write_kmrn_reg_generic(struct e1000_hw *hw, u32 offset, u16 data)
++{
++ return __e1000_write_kmrn_reg(hw, offset, data, false);
++}
++
++/**
++ * e1000_write_kmrn_reg_locked - Write kumeran register
++ * @hw: pointer to the HW structure
++ * @offset: register offset to write to
++ * @data: data to write at register offset
++ *
++ * Write the data to PHY register at the offset using the kumeran interface.
++ * Assumes semaphore already acquired.
++ **/
++s32 e1000_write_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, u16 data)
++{
++ return __e1000_write_kmrn_reg(hw, offset, data, true);
++}
++
++/**
++ * e1000_set_master_slave_mode - Setup PHY for Master/slave mode
++ * @hw: pointer to the HW structure
++ *
++ * Sets up Master/slave mode
++ **/
++static s32 e1000_set_master_slave_mode(struct e1000_hw *hw)
+ {
+- struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u16 phy_data;
+
+- if (phy->reset_disable) {
+- ret_val = 0;
+- goto out;
++ /* Resolve Master/Slave mode */
++ ret_val = hw->phy.ops.read_reg(hw, PHY_1000T_CTRL, &phy_data);
++ if (ret_val)
++ return ret_val;
++
++ /* load defaults for future use */
++ hw->phy.original_ms_type = (phy_data & CR_1000T_MS_ENABLE) ?
++ ((phy_data & CR_1000T_MS_VALUE) ?
++ e1000_ms_force_master :
++ e1000_ms_force_slave) : e1000_ms_auto;
++
++ switch (hw->phy.ms_type) {
++ case e1000_ms_force_master:
++ phy_data |= (CR_1000T_MS_ENABLE | CR_1000T_MS_VALUE);
++ break;
++ case e1000_ms_force_slave:
++ phy_data |= CR_1000T_MS_ENABLE;
++ phy_data &= ~(CR_1000T_MS_VALUE);
++ break;
++ case e1000_ms_auto:
++ phy_data &= ~CR_1000T_MS_ENABLE;
++ /* fall-through */
++ default:
++ break;
+ }
+
+- if (phy->type == e1000_phy_82580) {
++ return hw->phy.ops.write_reg(hw, PHY_1000T_CTRL, phy_data);
++}
++
++/**
++ * igb_e1000_copper_link_setup_82577 - Setup 82577 PHY for copper link
++ * @hw: pointer to the HW structure
++ *
++ * Sets up Carrier-sense on Transmit and downshift values.
++ **/
++/* Changed name, duplicated with e1000 */
++s32 igb_e1000_copper_link_setup_82577(struct e1000_hw *hw)
++{
++ s32 ret_val;
++ u16 phy_data;
++
++ DEBUGFUNC("igb_e1000_copper_link_setup_82577");
++
++ if (hw->phy.reset_disable)
++ return E1000_SUCCESS;
++
++ if (hw->phy.type == e1000_phy_82580) {
+ ret_val = hw->phy.ops.reset(hw);
+ if (ret_val) {
+- hw_dbg("Error resetting the PHY.\n");
+- goto out;
++ DEBUGOUT("Error resetting the PHY.\n");
++ return ret_val;
+ }
+ }
+
+- /* Enable CRS on TX. This must be set for half-duplex operation. */
+- ret_val = phy->ops.read_reg(hw, I82580_CFG_REG, &phy_data);
++ /* Enable CRS on Tx. This must be set for half-duplex operation. */
++ ret_val = hw->phy.ops.read_reg(hw, I82577_CFG_REG, &phy_data);
+ if (ret_val)
+- goto out;
++ return ret_val;
+
+- phy_data |= I82580_CFG_ASSERT_CRS_ON_TX;
++ phy_data |= I82577_CFG_ASSERT_CRS_ON_TX;
+
+ /* Enable downshift */
+- phy_data |= I82580_CFG_ENABLE_DOWNSHIFT;
++ phy_data |= I82577_CFG_ENABLE_DOWNSHIFT;
+
+- ret_val = phy->ops.write_reg(hw, I82580_CFG_REG, phy_data);
++ ret_val = hw->phy.ops.write_reg(hw, I82577_CFG_REG, phy_data);
+ if (ret_val)
+- goto out;
++ return ret_val;
+
+ /* Set MDI/MDIX mode */
+- ret_val = phy->ops.read_reg(hw, I82580_PHY_CTRL_2, &phy_data);
++ ret_val = hw->phy.ops.read_reg(hw, I82577_PHY_CTRL_2, &phy_data);
+ if (ret_val)
+- goto out;
+- phy_data &= ~I82580_PHY_CTRL2_MDIX_CFG_MASK;
++ return ret_val;
++ phy_data &= ~I82577_PHY_CTRL2_MDIX_CFG_MASK;
+ /* Options:
+ * 0 - Auto (default)
+ * 1 - MDI mode
+@@ -521,41 +1056,42 @@
+ case 1:
+ break;
+ case 2:
+- phy_data |= I82580_PHY_CTRL2_MANUAL_MDIX;
++ phy_data |= I82577_PHY_CTRL2_MANUAL_MDIX;
+ break;
+ case 0:
+ default:
+- phy_data |= I82580_PHY_CTRL2_AUTO_MDI_MDIX;
++ phy_data |= I82577_PHY_CTRL2_AUTO_MDI_MDIX;
+ break;
+ }
+- ret_val = hw->phy.ops.write_reg(hw, I82580_PHY_CTRL_2, phy_data);
++ ret_val = hw->phy.ops.write_reg(hw, I82577_PHY_CTRL_2, phy_data);
++ if (ret_val)
++ return ret_val;
+
+-out:
+- return ret_val;
++ return e1000_set_master_slave_mode(hw);
+ }
+
+ /**
+- * igb_copper_link_setup_m88 - Setup m88 PHY's for copper link
++ * e1000_copper_link_setup_m88 - Setup m88 PHY's for copper link
+ * @hw: pointer to the HW structure
+ *
+ * Sets up MDI/MDI-X and polarity for m88 PHY's. If necessary, transmit clock
+ * and downshift values are set also.
+ **/
+-s32 igb_copper_link_setup_m88(struct e1000_hw *hw)
++s32 e1000_copper_link_setup_m88(struct e1000_hw *hw)
+ {
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u16 phy_data;
+
+- if (phy->reset_disable) {
+- ret_val = 0;
+- goto out;
+- }
++ DEBUGFUNC("e1000_copper_link_setup_m88");
++
++ if (phy->reset_disable)
++ return E1000_SUCCESS;
+
+- /* Enable CRS on TX. This must be set for half-duplex operation. */
++ /* Enable CRS on Tx. This must be set for half-duplex operation. */
+ ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
+ if (ret_val)
+- goto out;
++ return ret_val;
+
+ phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX;
+
+@@ -591,12 +1127,12 @@
+ * 1 - Enabled
+ */
+ phy_data &= ~M88E1000_PSCR_POLARITY_REVERSAL;
+- if (phy->disable_polarity_correction == 1)
++ if (phy->disable_polarity_correction)
+ phy_data |= M88E1000_PSCR_POLARITY_REVERSAL;
+
+ ret_val = phy->ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
+ if (ret_val)
+- goto out;
++ return ret_val;
+
+ if (phy->revision < E1000_REVISION_4) {
+ /* Force TX_CLK in the Extended PHY Specific Control Register
+@@ -605,7 +1141,7 @@
+ ret_val = phy->ops.read_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL,
+ &phy_data);
+ if (ret_val)
+- goto out;
++ return ret_val;
+
+ phy_data |= M88E1000_EPSCR_TX_CLK_25;
+
+@@ -617,42 +1153,43 @@
+ } else {
+ /* Configure Master and Slave downshift values */
+ phy_data &= ~(M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK |
+- M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK);
++ M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK);
+ phy_data |= (M88E1000_EPSCR_MASTER_DOWNSHIFT_1X |
+ M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X);
+ }
+ ret_val = phy->ops.write_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL,
+ phy_data);
+ if (ret_val)
+- goto out;
++ return ret_val;
+ }
+
+ /* Commit the changes. */
+- ret_val = igb_phy_sw_reset(hw);
++ ret_val = phy->ops.commit(hw);
+ if (ret_val) {
+- hw_dbg("Error committing the PHY changes\n");
+- goto out;
++ DEBUGOUT("Error committing the PHY changes\n");
++ return ret_val;
+ }
+
+-out:
+- return ret_val;
++ return E1000_SUCCESS;
+ }
+
+ /**
+- * igb_copper_link_setup_m88_gen2 - Setup m88 PHY's for copper link
++ * e1000_copper_link_setup_m88_gen2 - Setup m88 PHY's for copper link
+ * @hw: pointer to the HW structure
+ *
+ * Sets up MDI/MDI-X and polarity for i347-AT4, m88e1322 and m88e1112 PHY's.
+ * Also enables and sets the downshift parameters.
+ **/
+-s32 igb_copper_link_setup_m88_gen2(struct e1000_hw *hw)
++s32 e1000_copper_link_setup_m88_gen2(struct e1000_hw *hw)
+ {
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u16 phy_data;
+
++ DEBUGFUNC("e1000_copper_link_setup_m88_gen2");
++
+ if (phy->reset_disable)
+- return 0;
++ return E1000_SUCCESS;
+
+ /* Enable CRS on Tx. This must be set for half-duplex operation. */
+ ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
+@@ -694,7 +1231,7 @@
+ * 1 - Enabled
+ */
+ phy_data &= ~M88E1000_PSCR_POLARITY_REVERSAL;
+- if (phy->disable_polarity_correction == 1)
++ if (phy->disable_polarity_correction)
+ phy_data |= M88E1000_PSCR_POLARITY_REVERSAL;
+
+ /* Enable downshift and setting it to X6 */
+@@ -705,9 +1242,9 @@
+ if (ret_val)
+ return ret_val;
+
+- ret_val = igb_phy_sw_reset(hw);
++ ret_val = phy->ops.commit(hw);
+ if (ret_val) {
+- hw_dbg("Error committing the PHY changes\n");
++ DEBUGOUT("Error committing the PHY changes\n");
+ return ret_val;
+ }
+ }
+@@ -721,70 +1258,60 @@
+ return ret_val;
+
+ /* Commit the changes. */
+- ret_val = igb_phy_sw_reset(hw);
++ ret_val = phy->ops.commit(hw);
+ if (ret_val) {
+- hw_dbg("Error committing the PHY changes\n");
++ DEBUGOUT("Error committing the PHY changes\n");
+ return ret_val;
+ }
+- ret_val = igb_set_master_slave_mode(hw);
++
++ ret_val = e1000_set_master_slave_mode(hw);
+ if (ret_val)
+ return ret_val;
+
+- return 0;
++ return E1000_SUCCESS;
+ }
+
+ /**
+- * igb_copper_link_setup_igp - Setup igp PHY's for copper link
++ * e1000_copper_link_setup_igp - Setup igp PHY's for copper link
+ * @hw: pointer to the HW structure
+ *
+ * Sets up LPLU, MDI/MDI-X, polarity, Smartspeed and Master/Slave config for
+ * igp PHY's.
+ **/
+-s32 igb_copper_link_setup_igp(struct e1000_hw *hw)
++s32 e1000_copper_link_setup_igp(struct e1000_hw *hw)
+ {
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u16 data;
+
+- if (phy->reset_disable) {
+- ret_val = 0;
+- goto out;
+- }
++ DEBUGFUNC("e1000_copper_link_setup_igp");
++
++ if (phy->reset_disable)
++ return E1000_SUCCESS;
+
+- ret_val = phy->ops.reset(hw);
++ ret_val = hw->phy.ops.reset(hw);
+ if (ret_val) {
+- hw_dbg("Error resetting the PHY.\n");
+- goto out;
++ DEBUGOUT("Error resetting the PHY.\n");
++ return ret_val;
+ }
+
+ /* Wait 100ms for MAC to configure PHY from NVM settings, to avoid
+ * timeout issues when LFS is enabled.
+ */
+- msleep(100);
++ msec_delay(100);
+
+- /* The NVM settings will configure LPLU in D3 for
+- * non-IGP1 PHYs.
+- */
+- if (phy->type == e1000_phy_igp) {
+- /* disable lplu d3 during driver init */
+- if (phy->ops.set_d3_lplu_state)
+- ret_val = phy->ops.set_d3_lplu_state(hw, false);
++ /* disable lplu d0 during driver init */
++ if (hw->phy.ops.set_d0_lplu_state) {
++ ret_val = hw->phy.ops.set_d0_lplu_state(hw, false);
+ if (ret_val) {
+- hw_dbg("Error Disabling LPLU D3\n");
+- goto out;
++ DEBUGOUT("Error Disabling LPLU D0\n");
++ return ret_val;
+ }
+ }
+-
+- /* disable lplu d0 during driver init */
+- ret_val = phy->ops.set_d0_lplu_state(hw, false);
+- if (ret_val) {
+- hw_dbg("Error Disabling LPLU D0\n");
+- goto out;
+- }
+ /* Configure mdi-mdix settings */
+ ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CTRL, &data);
+ if (ret_val)
+- goto out;
++ return ret_val;
+
+ data &= ~IGP01E1000_PSCR_AUTO_MDIX;
+
+@@ -802,7 +1329,7 @@
+ }
+ ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CTRL, data);
+ if (ret_val)
+- goto out;
++ return ret_val;
+
+ /* set auto-master slave resolution settings */
+ if (hw->mac.autoneg) {
+@@ -816,124 +1343,34 @@
+ IGP01E1000_PHY_PORT_CONFIG,
+ &data);
+ if (ret_val)
+- goto out;
++ return ret_val;
+
+ data &= ~IGP01E1000_PSCFR_SMART_SPEED;
+ ret_val = phy->ops.write_reg(hw,
+ IGP01E1000_PHY_PORT_CONFIG,
+ data);
+ if (ret_val)
+- goto out;
++ return ret_val;
+
+ /* Set auto Master/Slave resolution process */
+ ret_val = phy->ops.read_reg(hw, PHY_1000T_CTRL, &data);
+ if (ret_val)
+- goto out;
++ return ret_val;
+
+ data &= ~CR_1000T_MS_ENABLE;
+ ret_val = phy->ops.write_reg(hw, PHY_1000T_CTRL, data);
+ if (ret_val)
+- goto out;
+- }
+-
+- ret_val = phy->ops.read_reg(hw, PHY_1000T_CTRL, &data);
+- if (ret_val)
+- goto out;
+-
+- /* load defaults for future use */
+- phy->original_ms_type = (data & CR_1000T_MS_ENABLE) ?
+- ((data & CR_1000T_MS_VALUE) ?
+- e1000_ms_force_master :
+- e1000_ms_force_slave) :
+- e1000_ms_auto;
+-
+- switch (phy->ms_type) {
+- case e1000_ms_force_master:
+- data |= (CR_1000T_MS_ENABLE | CR_1000T_MS_VALUE);
+- break;
+- case e1000_ms_force_slave:
+- data |= CR_1000T_MS_ENABLE;
+- data &= ~(CR_1000T_MS_VALUE);
+- break;
+- case e1000_ms_auto:
+- data &= ~CR_1000T_MS_ENABLE;
+- default:
+- break;
++ return ret_val;
+ }
+- ret_val = phy->ops.write_reg(hw, PHY_1000T_CTRL, data);
+- if (ret_val)
+- goto out;
+- }
+-
+-out:
+- return ret_val;
+-}
+-
+-/**
+- * igb_copper_link_autoneg - Setup/Enable autoneg for copper link
+- * @hw: pointer to the HW structure
+- *
+- * Performs initial bounds checking on autoneg advertisement parameter, then
+- * configure to advertise the full capability. Setup the PHY to autoneg
+- * and restart the negotiation process between the link partner. If
+- * autoneg_wait_to_complete, then wait for autoneg to complete before exiting.
+- **/
+-static s32 igb_copper_link_autoneg(struct e1000_hw *hw)
+-{
+- struct e1000_phy_info *phy = &hw->phy;
+- s32 ret_val;
+- u16 phy_ctrl;
+-
+- /* Perform some bounds checking on the autoneg advertisement
+- * parameter.
+- */
+- phy->autoneg_advertised &= phy->autoneg_mask;
+-
+- /* If autoneg_advertised is zero, we assume it was not defaulted
+- * by the calling code so we set to advertise full capability.
+- */
+- if (phy->autoneg_advertised == 0)
+- phy->autoneg_advertised = phy->autoneg_mask;
+-
+- hw_dbg("Reconfiguring auto-neg advertisement params\n");
+- ret_val = igb_phy_setup_autoneg(hw);
+- if (ret_val) {
+- hw_dbg("Error Setting up Auto-Negotiation\n");
+- goto out;
+- }
+- hw_dbg("Restarting Auto-Neg\n");
+-
+- /* Restart auto-negotiation by setting the Auto Neg Enable bit and
+- * the Auto Neg Restart bit in the PHY control register.
+- */
+- ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_ctrl);
+- if (ret_val)
+- goto out;
+
+- phy_ctrl |= (MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG);
+- ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_ctrl);
+- if (ret_val)
+- goto out;
+-
+- /* Does the user want to wait for Auto-Neg to complete here, or
+- * check at a later time (for example, callback routine).
+- */
+- if (phy->autoneg_wait_to_complete) {
+- ret_val = igb_wait_autoneg(hw);
+- if (ret_val) {
+- hw_dbg("Error while waiting for autoneg to complete\n");
+- goto out;
+- }
++ ret_val = e1000_set_master_slave_mode(hw);
+ }
+
+- hw->mac.get_link_status = true;
+-
+-out:
+ return ret_val;
+ }
+
+ /**
+- * igb_phy_setup_autoneg - Configure PHY for auto-negotiation
++ * e1000_phy_setup_autoneg - Configure PHY for auto-negotiation
+ * @hw: pointer to the HW structure
+ *
+ * Reads the MII auto-neg advertisement register and/or the 1000T control
+@@ -941,26 +1378,28 @@
+ * return successful. Otherwise, setup advertisement and flow control to
+ * the appropriate values for the wanted auto-negotiation.
+ **/
+-static s32 igb_phy_setup_autoneg(struct e1000_hw *hw)
++static s32 e1000_phy_setup_autoneg(struct e1000_hw *hw)
+ {
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u16 mii_autoneg_adv_reg;
+ u16 mii_1000t_ctrl_reg = 0;
+
++ DEBUGFUNC("e1000_phy_setup_autoneg");
++
+ phy->autoneg_advertised &= phy->autoneg_mask;
+
+ /* Read the MII Auto-Neg Advertisement Register (Address 4). */
+ ret_val = phy->ops.read_reg(hw, PHY_AUTONEG_ADV, &mii_autoneg_adv_reg);
+ if (ret_val)
+- goto out;
++ return ret_val;
+
+ if (phy->autoneg_mask & ADVERTISE_1000_FULL) {
+ /* Read the MII 1000Base-T Control Register (Address 9). */
+ ret_val = phy->ops.read_reg(hw, PHY_1000T_CTRL,
+ &mii_1000t_ctrl_reg);
+ if (ret_val)
+- goto out;
++ return ret_val;
+ }
+
+ /* Need to parse both autoneg_advertised and fc and set up
+@@ -980,39 +1419,39 @@
+ NWAY_AR_10T_HD_CAPS);
+ mii_1000t_ctrl_reg &= ~(CR_1000T_HD_CAPS | CR_1000T_FD_CAPS);
+
+- hw_dbg("autoneg_advertised %x\n", phy->autoneg_advertised);
++ DEBUGOUT1("autoneg_advertised %x\n", phy->autoneg_advertised);
+
+ /* Do we want to advertise 10 Mb Half Duplex? */
+ if (phy->autoneg_advertised & ADVERTISE_10_HALF) {
+- hw_dbg("Advertise 10mb Half duplex\n");
++ DEBUGOUT("Advertise 10mb Half duplex\n");
+ mii_autoneg_adv_reg |= NWAY_AR_10T_HD_CAPS;
+ }
+
+ /* Do we want to advertise 10 Mb Full Duplex? */
+ if (phy->autoneg_advertised & ADVERTISE_10_FULL) {
+- hw_dbg("Advertise 10mb Full duplex\n");
++ DEBUGOUT("Advertise 10mb Full duplex\n");
+ mii_autoneg_adv_reg |= NWAY_AR_10T_FD_CAPS;
+ }
+
+ /* Do we want to advertise 100 Mb Half Duplex? */
+ if (phy->autoneg_advertised & ADVERTISE_100_HALF) {
+- hw_dbg("Advertise 100mb Half duplex\n");
++ DEBUGOUT("Advertise 100mb Half duplex\n");
+ mii_autoneg_adv_reg |= NWAY_AR_100TX_HD_CAPS;
+ }
+
+ /* Do we want to advertise 100 Mb Full Duplex? */
+ if (phy->autoneg_advertised & ADVERTISE_100_FULL) {
+- hw_dbg("Advertise 100mb Full duplex\n");
++ DEBUGOUT("Advertise 100mb Full duplex\n");
+ mii_autoneg_adv_reg |= NWAY_AR_100TX_FD_CAPS;
+ }
+
+ /* We do not allow the Phy to advertise 1000 Mb Half Duplex */
+ if (phy->autoneg_advertised & ADVERTISE_1000_HALF)
+- hw_dbg("Advertise 1000mb Half duplex request denied!\n");
++ DEBUGOUT("Advertise 1000mb Half duplex request denied!\n");
+
+ /* Do we want to advertise 1000 Mb Full Duplex? */
+ if (phy->autoneg_advertised & ADVERTISE_1000_FULL) {
+- hw_dbg("Advertise 1000mb Full duplex\n");
++ DEBUGOUT("Advertise 1000mb Full duplex\n");
+ mii_1000t_ctrl_reg |= CR_1000T_FD_CAPS;
+ }
+
+@@ -1029,68 +1468,126 @@
+ * but not send pause frames).
+ * 2: Tx flow control is enabled (we can send pause frames
+ * but we do not support receiving pause frames).
+- * 3: Both Rx and TX flow control (symmetric) are enabled.
++ * 3: Both Rx and Tx flow control (symmetric) are enabled.
+ * other: No software override. The flow control configuration
+ * in the EEPROM is used.
+ */
+ switch (hw->fc.current_mode) {
+ case e1000_fc_none:
+- /* Flow control (RX & TX) is completely disabled by a
++ /* Flow control (Rx & Tx) is completely disabled by a
+ * software over-ride.
+ */
+ mii_autoneg_adv_reg &= ~(NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
+ break;
+ case e1000_fc_rx_pause:
+- /* RX Flow control is enabled, and TX Flow control is
++ /* Rx Flow control is enabled, and Tx Flow control is
+ * disabled, by a software over-ride.
+ *
+ * Since there really isn't a way to advertise that we are
+- * capable of RX Pause ONLY, we will advertise that we
+- * support both symmetric and asymmetric RX PAUSE. Later
++ * capable of Rx Pause ONLY, we will advertise that we
++ * support both symmetric and asymmetric Rx PAUSE. Later
+ * (in e1000_config_fc_after_link_up) we will disable the
+ * hw's ability to send PAUSE frames.
+ */
+ mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
+ break;
+ case e1000_fc_tx_pause:
+- /* TX Flow control is enabled, and RX Flow control is
++ /* Tx Flow control is enabled, and Rx Flow control is
+ * disabled, by a software over-ride.
+ */
+ mii_autoneg_adv_reg |= NWAY_AR_ASM_DIR;
+ mii_autoneg_adv_reg &= ~NWAY_AR_PAUSE;
+ break;
+ case e1000_fc_full:
+- /* Flow control (both RX and TX) is enabled by a software
++ /* Flow control (both Rx and Tx) is enabled by a software
+ * over-ride.
+ */
+ mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
+ break;
+ default:
+- hw_dbg("Flow control param set incorrectly\n");
+- ret_val = -E1000_ERR_CONFIG;
+- goto out;
++ DEBUGOUT("Flow control param set incorrectly\n");
++ return -E1000_ERR_CONFIG;
+ }
+
+ ret_val = phy->ops.write_reg(hw, PHY_AUTONEG_ADV, mii_autoneg_adv_reg);
+ if (ret_val)
+- goto out;
++ return ret_val;
+
+- hw_dbg("Auto-Neg Advertising %x\n", mii_autoneg_adv_reg);
++ DEBUGOUT1("Auto-Neg Advertising %x\n", mii_autoneg_adv_reg);
+
+- if (phy->autoneg_mask & ADVERTISE_1000_FULL) {
+- ret_val = phy->ops.write_reg(hw,
+- PHY_1000T_CTRL,
++ if (phy->autoneg_mask & ADVERTISE_1000_FULL)
++ ret_val = phy->ops.write_reg(hw, PHY_1000T_CTRL,
+ mii_1000t_ctrl_reg);
+- if (ret_val)
+- goto out;
++
++ return ret_val;
++}
++
++/**
++ * e1000_copper_link_autoneg - Setup/Enable autoneg for copper link
++ * @hw: pointer to the HW structure
++ *
++ * Performs initial bounds checking on autoneg advertisement parameter, then
++ * configure to advertise the full capability. Setup the PHY to autoneg
++ * and restart the negotiation process between the link partner. If
++ * autoneg_wait_to_complete, then wait for autoneg to complete before exiting.
++ **/
++static s32 e1000_copper_link_autoneg(struct e1000_hw *hw)
++{
++ struct e1000_phy_info *phy = &hw->phy;
++ s32 ret_val;
++ u16 phy_ctrl;
++
++ DEBUGFUNC("e1000_copper_link_autoneg");
++
++ /* Perform some bounds checking on the autoneg advertisement
++ * parameter.
++ */
++ phy->autoneg_advertised &= phy->autoneg_mask;
++
++ /* If autoneg_advertised is zero, we assume it was not defaulted
++ * by the calling code so we set to advertise full capability.
++ */
++ if (!phy->autoneg_advertised)
++ phy->autoneg_advertised = phy->autoneg_mask;
++
++ DEBUGOUT("Reconfiguring auto-neg advertisement params\n");
++ ret_val = e1000_phy_setup_autoneg(hw);
++ if (ret_val) {
++ DEBUGOUT("Error Setting up Auto-Negotiation\n");
++ return ret_val;
++ }
++ DEBUGOUT("Restarting Auto-Neg\n");
++
++ /* Restart auto-negotiation by setting the Auto Neg Enable bit and
++ * the Auto Neg Restart bit in the PHY control register.
++ */
++ ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_ctrl);
++ if (ret_val)
++ return ret_val;
++
++ phy_ctrl |= (MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG);
++ ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_ctrl);
++ if (ret_val)
++ return ret_val;
++
++ /* Does the user want to wait for Auto-Neg to complete here, or
++ * check at a later time (for example, callback routine).
++ */
++ if (phy->autoneg_wait_to_complete) {
++ ret_val = e1000_wait_autoneg(hw);
++ if (ret_val) {
++ DEBUGOUT("Error while waiting for autoneg to complete\n");
++ return ret_val;
++ }
+ }
+
+-out:
++ hw->mac.get_link_status = true;
++
+ return ret_val;
+ }
+
+ /**
+- * igb_setup_copper_link - Configure copper link settings
++ * e1000_setup_copper_link_generic - Configure copper link settings
+ * @hw: pointer to the HW structure
+ *
+ * Calls the appropriate function to configure the link for auto-neg or forced
+@@ -1098,129 +1595,134 @@
+ * to configure collision distance and flow control are called. If link is
+ * not established, we return -E1000_ERR_PHY (-2).
+ **/
+-s32 igb_setup_copper_link(struct e1000_hw *hw)
++s32 e1000_setup_copper_link_generic(struct e1000_hw *hw)
+ {
+ s32 ret_val;
+ bool link;
+
++ DEBUGFUNC("e1000_setup_copper_link_generic");
++
+ if (hw->mac.autoneg) {
+ /* Setup autoneg and flow control advertisement and perform
+ * autonegotiation.
+ */
+- ret_val = igb_copper_link_autoneg(hw);
++ ret_val = e1000_copper_link_autoneg(hw);
+ if (ret_val)
+- goto out;
++ return ret_val;
+ } else {
+ /* PHY will be set to 10H, 10F, 100H or 100F
+ * depending on user settings.
+ */
+- hw_dbg("Forcing Speed and Duplex\n");
++ DEBUGOUT("Forcing Speed and Duplex\n");
+ ret_val = hw->phy.ops.force_speed_duplex(hw);
+ if (ret_val) {
+- hw_dbg("Error Forcing Speed and Duplex\n");
+- goto out;
++ DEBUGOUT("Error Forcing Speed and Duplex\n");
++ return ret_val;
+ }
+ }
+
+ /* Check link status. Wait up to 100 microseconds for link to become
+ * valid.
+ */
+- ret_val = igb_phy_has_link(hw, COPPER_LINK_UP_LIMIT, 10, &link);
++ ret_val = e1000_phy_has_link_generic(hw, COPPER_LINK_UP_LIMIT, 10,
++ &link);
+ if (ret_val)
+- goto out;
++ return ret_val;
+
+ if (link) {
+- hw_dbg("Valid link established!!!\n");
+- igb_config_collision_dist(hw);
+- ret_val = igb_config_fc_after_link_up(hw);
++ DEBUGOUT("Valid link established!!!\n");
++ hw->mac.ops.config_collision_dist(hw);
++ ret_val = e1000_config_fc_after_link_up_generic(hw);
+ } else {
+- hw_dbg("Unable to establish link!!!\n");
++ DEBUGOUT("Unable to establish link!!!\n");
+ }
+
+-out:
+ return ret_val;
+ }
+
+ /**
+- * igb_phy_force_speed_duplex_igp - Force speed/duplex for igp PHY
++ * e1000_phy_force_speed_duplex_igp - Force speed/duplex for igp PHY
+ * @hw: pointer to the HW structure
+ *
+ * Calls the PHY setup function to force speed and duplex. Clears the
+ * auto-crossover to force MDI manually. Waits for link and returns
+ * successful if link up is successful, else -E1000_ERR_PHY (-2).
+ **/
+-s32 igb_phy_force_speed_duplex_igp(struct e1000_hw *hw)
++s32 e1000_phy_force_speed_duplex_igp(struct e1000_hw *hw)
+ {
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u16 phy_data;
+ bool link;
+
++ DEBUGFUNC("e1000_phy_force_speed_duplex_igp");
++
+ ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_data);
+ if (ret_val)
+- goto out;
++ return ret_val;
+
+- igb_phy_force_speed_duplex_setup(hw, &phy_data);
++ e1000_phy_force_speed_duplex_setup(hw, &phy_data);
+
+ ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_data);
+ if (ret_val)
+- goto out;
++ return ret_val;
+
+ /* Clear Auto-Crossover to force MDI manually. IGP requires MDI
+ * forced whenever speed and duplex are forced.
+ */
+ ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CTRL, &phy_data);
+ if (ret_val)
+- goto out;
++ return ret_val;
+
+ phy_data &= ~IGP01E1000_PSCR_AUTO_MDIX;
+ phy_data &= ~IGP01E1000_PSCR_FORCE_MDI_MDIX;
+
+ ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CTRL, phy_data);
+ if (ret_val)
+- goto out;
++ return ret_val;
+
+- hw_dbg("IGP PSCR: %X\n", phy_data);
++ DEBUGOUT1("IGP PSCR: %X\n", phy_data);
+
+- udelay(1);
++ usec_delay(1);
+
+ if (phy->autoneg_wait_to_complete) {
+- hw_dbg("Waiting for forced speed/duplex link on IGP phy.\n");
++ DEBUGOUT("Waiting for forced speed/duplex link on IGP phy.\n");
+
+- ret_val = igb_phy_has_link(hw, PHY_FORCE_LIMIT, 10000, &link);
++ ret_val = e1000_phy_has_link_generic(hw, PHY_FORCE_LIMIT,
++ 100000, &link);
+ if (ret_val)
+- goto out;
++ return ret_val;
+
+ if (!link)
+- hw_dbg("Link taking longer than expected.\n");
++ DEBUGOUT("Link taking longer than expected.\n");
+
+ /* Try once more */
+- ret_val = igb_phy_has_link(hw, PHY_FORCE_LIMIT, 10000, &link);
+- if (ret_val)
+- goto out;
++ ret_val = e1000_phy_has_link_generic(hw, PHY_FORCE_LIMIT,
++ 100000, &link);
+ }
+
+-out:
+ return ret_val;
+ }
+
+ /**
+- * igb_phy_force_speed_duplex_m88 - Force speed/duplex for m88 PHY
++ * e1000_phy_force_speed_duplex_m88 - Force speed/duplex for m88 PHY
+ * @hw: pointer to the HW structure
+ *
+ * Calls the PHY setup function to force speed and duplex. Clears the
+ * auto-crossover to force MDI manually. Resets the PHY to commit the
+ * changes. If time expires while waiting for link up, we reset the DSP.
+- * After reset, TX_CLK and CRS on TX must be set. Return successful upon
++ * After reset, TX_CLK and CRS on Tx must be set. Return successful upon
+ * successful completion, else return corresponding error code.
+ **/
+-s32 igb_phy_force_speed_duplex_m88(struct e1000_hw *hw)
++s32 e1000_phy_force_speed_duplex_m88(struct e1000_hw *hw)
+ {
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u16 phy_data;
+ bool link;
+
++ DEBUGFUNC("e1000_phy_force_speed_duplex_m88");
++
+ /* I210 and I211 devices support Auto-Crossover in forced operation. */
+ if (phy->type != e1000_phy_i210) {
+ /* Clear Auto-Crossover to force MDI manually. M88E1000
+@@ -1229,45 +1731,49 @@
+ ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL,
+ &phy_data);
+ if (ret_val)
+- goto out;
++ return ret_val;
+
+ phy_data &= ~M88E1000_PSCR_AUTO_X_MODE;
+ ret_val = phy->ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL,
+ phy_data);
+ if (ret_val)
+- goto out;
++ return ret_val;
+
+- hw_dbg("M88E1000 PSCR: %X\n", phy_data);
++ DEBUGOUT1("M88E1000 PSCR: %X\n", phy_data);
+ }
+
+ ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_data);
+ if (ret_val)
+- goto out;
++ return ret_val;
+
+- igb_phy_force_speed_duplex_setup(hw, &phy_data);
++ e1000_phy_force_speed_duplex_setup(hw, &phy_data);
+
+ ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_data);
+ if (ret_val)
+- goto out;
++ return ret_val;
+
+ /* Reset the phy to commit changes. */
+- ret_val = igb_phy_sw_reset(hw);
++ ret_val = hw->phy.ops.commit(hw);
+ if (ret_val)
+- goto out;
++ return ret_val;
+
+ if (phy->autoneg_wait_to_complete) {
+- hw_dbg("Waiting for forced speed/duplex link on M88 phy.\n");
++ DEBUGOUT("Waiting for forced speed/duplex link on M88 phy.\n");
+
+- ret_val = igb_phy_has_link(hw, PHY_FORCE_LIMIT, 100000, &link);
++ ret_val = e1000_phy_has_link_generic(hw, PHY_FORCE_LIMIT,
++ 100000, &link);
+ if (ret_val)
+- goto out;
++ return ret_val;
+
+ if (!link) {
+ bool reset_dsp = true;
+
+ switch (hw->phy.id) {
+ case I347AT4_E_PHY_ID:
++ case M88E1340M_E_PHY_ID:
+ case M88E1112_E_PHY_ID:
++ case M88E1543_E_PHY_ID:
++ case M88E1512_E_PHY_ID:
+ case I210_I_PHY_ID:
+ reset_dsp = false;
+ break;
+@@ -1276,9 +1782,10 @@
+ reset_dsp = false;
+ break;
+ }
+- if (!reset_dsp)
+- hw_dbg("Link taking longer than expected.\n");
+- else {
++
++ if (!reset_dsp) {
++ DEBUGOUT("Link taking longer than expected.\n");
++ } else {
+ /* We didn't get link.
+ * Reset the DSP and cross our fingers.
+ */
+@@ -1286,29 +1793,35 @@
+ M88E1000_PHY_PAGE_SELECT,
+ 0x001d);
+ if (ret_val)
+- goto out;
+- ret_val = igb_phy_reset_dsp(hw);
++ return ret_val;
++ ret_val = e1000_phy_reset_dsp_generic(hw);
+ if (ret_val)
+- goto out;
++ return ret_val;
+ }
+ }
+
+ /* Try once more */
+- ret_val = igb_phy_has_link(hw, PHY_FORCE_LIMIT,
+- 100000, &link);
++ ret_val = e1000_phy_has_link_generic(hw, PHY_FORCE_LIMIT,
++ 100000, &link);
+ if (ret_val)
+- goto out;
++ return ret_val;
+ }
+
+- if (hw->phy.type != e1000_phy_m88 ||
+- hw->phy.id == I347AT4_E_PHY_ID ||
+- hw->phy.id == M88E1112_E_PHY_ID ||
+- hw->phy.id == I210_I_PHY_ID)
+- goto out;
++ if (hw->phy.type != e1000_phy_m88)
++ return E1000_SUCCESS;
+
++ if (hw->phy.id == I347AT4_E_PHY_ID ||
++ hw->phy.id == M88E1340M_E_PHY_ID ||
++ hw->phy.id == M88E1112_E_PHY_ID)
++ return E1000_SUCCESS;
++ if (hw->phy.id == I210_I_PHY_ID)
++ return E1000_SUCCESS;
++ if ((hw->phy.id == M88E1543_E_PHY_ID) ||
++ (hw->phy.id == M88E1512_E_PHY_ID))
++ return E1000_SUCCESS;
+ ret_val = phy->ops.read_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_data);
+ if (ret_val)
+- goto out;
++ return ret_val;
+
+ /* Resetting the phy means we need to re-force TX_CLK in the
+ * Extended PHY Specific Control Register to 25MHz clock from
+@@ -1317,24 +1830,88 @@
+ phy_data |= M88E1000_EPSCR_TX_CLK_25;
+ ret_val = phy->ops.write_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, phy_data);
+ if (ret_val)
+- goto out;
++ return ret_val;
+
+ /* In addition, we must re-enable CRS on Tx for both half and full
+ * duplex.
+ */
+ ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
+ if (ret_val)
+- goto out;
++ return ret_val;
+
+ phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX;
+ ret_val = phy->ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
+
+-out:
+ return ret_val;
+ }
+
+ /**
+- * igb_phy_force_speed_duplex_setup - Configure forced PHY speed/duplex
++ * igb_e1000_phy_force_speed_duplex_ife - Force PHY speed & duplex
++ * @hw: pointer to the HW structure
++ *
++ * Forces the speed and duplex settings of the PHY.
++ * This is a function pointer entry point only called by
++ * PHY setup routines.
++ **/
++/* Changed name, duplicated with e1000 */
++s32 igb_e1000_phy_force_speed_duplex_ife(struct e1000_hw *hw)
++{
++ struct e1000_phy_info *phy = &hw->phy;
++ s32 ret_val;
++ u16 data;
++ bool link;
++
++ DEBUGFUNC("igb_e1000_phy_force_speed_duplex_ife");
++
++ ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &data);
++ if (ret_val)
++ return ret_val;
++
++ e1000_phy_force_speed_duplex_setup(hw, &data);
++
++ ret_val = phy->ops.write_reg(hw, PHY_CONTROL, data);
++ if (ret_val)
++ return ret_val;
++
++ /* Disable MDI-X support for 10/100 */
++ ret_val = phy->ops.read_reg(hw, IFE_PHY_MDIX_CONTROL, &data);
++ if (ret_val)
++ return ret_val;
++
++ data &= ~IFE_PMC_AUTO_MDIX;
++ data &= ~IFE_PMC_FORCE_MDIX;
++
++ ret_val = phy->ops.write_reg(hw, IFE_PHY_MDIX_CONTROL, data);
++ if (ret_val)
++ return ret_val;
++
++ DEBUGOUT1("IFE PMC: %X\n", data);
++
++ usec_delay(1);
++
++ if (phy->autoneg_wait_to_complete) {
++ DEBUGOUT("Waiting for forced speed/duplex link on IFE phy.\n");
++
++ ret_val = e1000_phy_has_link_generic(hw, PHY_FORCE_LIMIT,
++ 100000, &link);
++ if (ret_val)
++ return ret_val;
++
++ if (!link)
++ DEBUGOUT("Link taking longer than expected.\n");
++
++ /* Try once more */
++ ret_val = e1000_phy_has_link_generic(hw, PHY_FORCE_LIMIT,
++ 100000, &link);
++ if (ret_val)
++ return ret_val;
++ }
++
++ return E1000_SUCCESS;
++}
++
++/**
++ * e1000_phy_force_speed_duplex_setup - Configure forced PHY speed/duplex
+ * @hw: pointer to the HW structure
+ * @phy_ctrl: pointer to current value of PHY_CONTROL
+ *
+@@ -1345,17 +1922,18 @@
+ * caller must write to the PHY_CONTROL register for these settings to
+ * take affect.
+ **/
+-static void igb_phy_force_speed_duplex_setup(struct e1000_hw *hw,
+- u16 *phy_ctrl)
++void e1000_phy_force_speed_duplex_setup(struct e1000_hw *hw, u16 *phy_ctrl)
+ {
+ struct e1000_mac_info *mac = &hw->mac;
+ u32 ctrl;
+
++ DEBUGFUNC("e1000_phy_force_speed_duplex_setup");
++
+ /* Turn off flow control when forcing speed/duplex */
+ hw->fc.current_mode = e1000_fc_none;
+
+ /* Force speed/duplex on the mac */
+- ctrl = rd32(E1000_CTRL);
++ ctrl = E1000_READ_REG(hw, E1000_CTRL);
+ ctrl |= (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
+ ctrl &= ~E1000_CTRL_SPD_SEL;
+
+@@ -1369,33 +1947,32 @@
+ if (mac->forced_speed_duplex & E1000_ALL_HALF_DUPLEX) {
+ ctrl &= ~E1000_CTRL_FD;
+ *phy_ctrl &= ~MII_CR_FULL_DUPLEX;
+- hw_dbg("Half Duplex\n");
++ DEBUGOUT("Half Duplex\n");
+ } else {
+ ctrl |= E1000_CTRL_FD;
+ *phy_ctrl |= MII_CR_FULL_DUPLEX;
+- hw_dbg("Full Duplex\n");
++ DEBUGOUT("Full Duplex\n");
+ }
+
+ /* Forcing 10mb or 100mb? */
+ if (mac->forced_speed_duplex & E1000_ALL_100_SPEED) {
+ ctrl |= E1000_CTRL_SPD_100;
+ *phy_ctrl |= MII_CR_SPEED_100;
+- *phy_ctrl &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_10);
+- hw_dbg("Forcing 100mb\n");
++ *phy_ctrl &= ~MII_CR_SPEED_1000;
++ DEBUGOUT("Forcing 100mb\n");
+ } else {
+ ctrl &= ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100);
+- *phy_ctrl |= MII_CR_SPEED_10;
+ *phy_ctrl &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_100);
+- hw_dbg("Forcing 10mb\n");
++ DEBUGOUT("Forcing 10mb\n");
+ }
+
+- igb_config_collision_dist(hw);
++ hw->mac.ops.config_collision_dist(hw);
+
+- wr32(E1000_CTRL, ctrl);
++ E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
+ }
+
+ /**
+- * igb_set_d3_lplu_state - Sets low power link up state for D3
++ * e1000_set_d3_lplu_state_generic - Sets low power link up state for D3
+ * @hw: pointer to the HW structure
+ * @active: boolean used to enable/disable lplu
+ *
+@@ -1408,25 +1985,27 @@
+ * During driver activity, SmartSpeed should be enabled so performance is
+ * maintained.
+ **/
+-s32 igb_set_d3_lplu_state(struct e1000_hw *hw, bool active)
++s32 e1000_set_d3_lplu_state_generic(struct e1000_hw *hw, bool active)
+ {
+ struct e1000_phy_info *phy = &hw->phy;
+- s32 ret_val = 0;
++ s32 ret_val;
+ u16 data;
+
+- if (!(hw->phy.ops.read_reg))
+- goto out;
++ DEBUGFUNC("e1000_set_d3_lplu_state_generic");
++
++ if (!hw->phy.ops.read_reg)
++ return E1000_SUCCESS;
+
+ ret_val = phy->ops.read_reg(hw, IGP02E1000_PHY_POWER_MGMT, &data);
+ if (ret_val)
+- goto out;
++ return ret_val;
+
+ if (!active) {
+ data &= ~IGP02E1000_PM_D3_LPLU;
+ ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT,
+ data);
+ if (ret_val)
+- goto out;
++ return ret_val;
+ /* LPLU and SmartSpeed are mutually exclusive. LPLU is used
+ * during Dx states where the power conservation is most
+ * important. During driver activity we should enable
+@@ -1437,176 +2016,219 @@
+ IGP01E1000_PHY_PORT_CONFIG,
+ &data);
+ if (ret_val)
+- goto out;
++ return ret_val;
+
+ data |= IGP01E1000_PSCFR_SMART_SPEED;
+ ret_val = phy->ops.write_reg(hw,
+ IGP01E1000_PHY_PORT_CONFIG,
+ data);
+ if (ret_val)
+- goto out;
++ return ret_val;
+ } else if (phy->smart_speed == e1000_smart_speed_off) {
+ ret_val = phy->ops.read_reg(hw,
+- IGP01E1000_PHY_PORT_CONFIG,
+- &data);
++ IGP01E1000_PHY_PORT_CONFIG,
++ &data);
+ if (ret_val)
+- goto out;
++ return ret_val;
+
+ data &= ~IGP01E1000_PSCFR_SMART_SPEED;
+ ret_val = phy->ops.write_reg(hw,
+ IGP01E1000_PHY_PORT_CONFIG,
+ data);
+ if (ret_val)
+- goto out;
++ return ret_val;
+ }
+ } else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) ||
+ (phy->autoneg_advertised == E1000_ALL_NOT_GIG) ||
+ (phy->autoneg_advertised == E1000_ALL_10_SPEED)) {
+ data |= IGP02E1000_PM_D3_LPLU;
+ ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT,
+- data);
++ data);
+ if (ret_val)
+- goto out;
++ return ret_val;
+
+ /* When LPLU is enabled, we should disable SmartSpeed */
+ ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
+ &data);
+ if (ret_val)
+- goto out;
++ return ret_val;
+
+ data &= ~IGP01E1000_PSCFR_SMART_SPEED;
+ ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
+ data);
+ }
+
+-out:
+ return ret_val;
+ }
+
+ /**
+- * igb_check_downshift - Checks whether a downshift in speed occurred
++ * e1000_check_downshift_generic - Checks whether a downshift in speed occurred
+ * @hw: pointer to the HW structure
+ *
+ * Success returns 0, Failure returns 1
+ *
+ * A downshift is detected by querying the PHY link health.
+ **/
+-s32 igb_check_downshift(struct e1000_hw *hw)
++s32 e1000_check_downshift_generic(struct e1000_hw *hw)
+ {
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u16 phy_data, offset, mask;
+
++ DEBUGFUNC("e1000_check_downshift_generic");
++
+ switch (phy->type) {
+ case e1000_phy_i210:
+ case e1000_phy_m88:
+ case e1000_phy_gg82563:
+- offset = M88E1000_PHY_SPEC_STATUS;
+- mask = M88E1000_PSSR_DOWNSHIFT;
++ offset = M88E1000_PHY_SPEC_STATUS;
++ mask = M88E1000_PSSR_DOWNSHIFT;
+ break;
+ case e1000_phy_igp_2:
+- case e1000_phy_igp:
+ case e1000_phy_igp_3:
+- offset = IGP01E1000_PHY_LINK_HEALTH;
+- mask = IGP01E1000_PLHR_SS_DOWNGRADE;
++ offset = IGP01E1000_PHY_LINK_HEALTH;
++ mask = IGP01E1000_PLHR_SS_DOWNGRADE;
+ break;
+ default:
+ /* speed downshift not supported */
+ phy->speed_downgraded = false;
+- ret_val = 0;
+- goto out;
++ return E1000_SUCCESS;
+ }
+
+ ret_val = phy->ops.read_reg(hw, offset, &phy_data);
+
+ if (!ret_val)
+- phy->speed_downgraded = (phy_data & mask) ? true : false;
++ phy->speed_downgraded = !!(phy_data & mask);
+
+-out:
+ return ret_val;
+ }
+
+ /**
+- * igb_check_polarity_m88 - Checks the polarity.
++ * igb_e1000_check_polarity_m88 - Checks the polarity.
+ * @hw: pointer to the HW structure
+ *
+ * Success returns 0, Failure returns -E1000_ERR_PHY (-2)
+ *
+ * Polarity is determined based on the PHY specific status register.
+ **/
+-s32 igb_check_polarity_m88(struct e1000_hw *hw)
++/* Changed name, duplicated with e1000 */
++s32 igb_e1000_check_polarity_m88(struct e1000_hw *hw)
+ {
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u16 data;
+
++ DEBUGFUNC("igb_e1000_check_polarity_m88");
++
+ ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_STATUS, &data);
+
+ if (!ret_val)
+- phy->cable_polarity = (data & M88E1000_PSSR_REV_POLARITY)
+- ? e1000_rev_polarity_reversed
+- : e1000_rev_polarity_normal;
++ phy->cable_polarity = ((data & M88E1000_PSSR_REV_POLARITY)
++ ? e1000_rev_polarity_reversed
++ : e1000_rev_polarity_normal);
++
++ return ret_val;
++}
++
++/**
++ * igb_e1000_check_polarity_igp - Checks the polarity.
++ * @hw: pointer to the HW structure
++ *
++ * Success returns 0, Failure returns -E1000_ERR_PHY (-2)
++ *
++ * Polarity is determined based on the PHY port status register, and the
++ * current speed (since there is no polarity at 100Mbps).
++ **/
++/* Changed name, duplicated with e1000 */
++s32 igb_e1000_check_polarity_igp(struct e1000_hw *hw)
++{
++ struct e1000_phy_info *phy = &hw->phy;
++ s32 ret_val;
++ u16 data, offset, mask;
++
++ DEBUGFUNC("igb_e1000_check_polarity_igp");
++
++ /* Polarity is determined based on the speed of
++ * our connection.
++ */
++ ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_STATUS, &data);
++ if (ret_val)
++ return ret_val;
++
++ if ((data & IGP01E1000_PSSR_SPEED_MASK) ==
++ IGP01E1000_PSSR_SPEED_1000MBPS) {
++ offset = IGP01E1000_PHY_PCS_INIT_REG;
++ mask = IGP01E1000_PHY_POLARITY_MASK;
++ } else {
++ /* This really only applies to 10Mbps since
++ * there is no polarity for 100Mbps (always 0).
++ */
++ offset = IGP01E1000_PHY_PORT_STATUS;
++ mask = IGP01E1000_PSSR_POLARITY_REVERSED;
++ }
++
++ ret_val = phy->ops.read_reg(hw, offset, &data);
++
++ if (!ret_val)
++ phy->cable_polarity = ((data & mask)
++ ? e1000_rev_polarity_reversed
++ : e1000_rev_polarity_normal);
+
+ return ret_val;
+ }
+
+ /**
+- * igb_check_polarity_igp - Checks the polarity.
++ * igb_e1000_check_polarity_ife - Check cable polarity for IFE PHY
+ * @hw: pointer to the HW structure
+ *
+- * Success returns 0, Failure returns -E1000_ERR_PHY (-2)
+- *
+- * Polarity is determined based on the PHY port status register, and the
+- * current speed (since there is no polarity at 100Mbps).
++ * Polarity is determined on the polarity reversal feature being enabled.
+ **/
+-static s32 igb_check_polarity_igp(struct e1000_hw *hw)
++/* Changed name, duplicated with e1000 */
++s32 igb_e1000_check_polarity_ife(struct e1000_hw *hw)
+ {
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+- u16 data, offset, mask;
++ u16 phy_data, offset, mask;
+
+- /* Polarity is determined based on the speed of
+- * our connection.
+- */
+- ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_STATUS, &data);
+- if (ret_val)
+- goto out;
++ DEBUGFUNC("igb_e1000_check_polarity_ife");
+
+- if ((data & IGP01E1000_PSSR_SPEED_MASK) ==
+- IGP01E1000_PSSR_SPEED_1000MBPS) {
+- offset = IGP01E1000_PHY_PCS_INIT_REG;
+- mask = IGP01E1000_PHY_POLARITY_MASK;
++ /* Polarity is determined based on the reversal feature being enabled.
++ */
++ if (phy->polarity_correction) {
++ offset = IFE_PHY_EXTENDED_STATUS_CONTROL;
++ mask = IFE_PESC_POLARITY_REVERSED;
+ } else {
+- /* This really only applies to 10Mbps since
+- * there is no polarity for 100Mbps (always 0).
+- */
+- offset = IGP01E1000_PHY_PORT_STATUS;
+- mask = IGP01E1000_PSSR_POLARITY_REVERSED;
++ offset = IFE_PHY_SPECIAL_CONTROL;
++ mask = IFE_PSC_FORCE_POLARITY;
+ }
+
+- ret_val = phy->ops.read_reg(hw, offset, &data);
++ ret_val = phy->ops.read_reg(hw, offset, &phy_data);
+
+ if (!ret_val)
+- phy->cable_polarity = (data & mask)
+- ? e1000_rev_polarity_reversed
+- : e1000_rev_polarity_normal;
++ phy->cable_polarity = ((phy_data & mask)
++ ? e1000_rev_polarity_reversed
++ : e1000_rev_polarity_normal);
+
+-out:
+ return ret_val;
+ }
+
+ /**
+- * igb_wait_autoneg - Wait for auto-neg completion
++ * e1000_wait_autoneg - Wait for auto-neg completion
+ * @hw: pointer to the HW structure
+ *
+ * Waits for auto-negotiation to complete or for the auto-negotiation time
+ * limit to expire, which ever happens first.
+ **/
+-static s32 igb_wait_autoneg(struct e1000_hw *hw)
++static s32 e1000_wait_autoneg(struct e1000_hw *hw)
+ {
+- s32 ret_val = 0;
++ s32 ret_val = E1000_SUCCESS;
+ u16 i, phy_status;
+
++ DEBUGFUNC("e1000_wait_autoneg");
++
++ if (!hw->phy.ops.read_reg)
++ return E1000_SUCCESS;
++
+ /* Break after autoneg completes or PHY_AUTO_NEG_LIMIT expires. */
+ for (i = PHY_AUTO_NEG_LIMIT; i > 0; i--) {
+ ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status);
+@@ -1617,7 +2239,7 @@
+ break;
+ if (phy_status & MII_SR_AUTONEG_COMPLETE)
+ break;
+- msleep(100);
++ msec_delay(100);
+ }
+
+ /* PHY_AUTO_NEG_TIME expiration doesn't guarantee auto-negotiation
+@@ -1627,7 +2249,7 @@
+ }
+
+ /**
+- * igb_phy_has_link - Polls PHY for link
++ * e1000_phy_has_link_generic - Polls PHY for link
+ * @hw: pointer to the HW structure
+ * @iterations: number of times to poll for link
+ * @usec_interval: delay between polling attempts
+@@ -1635,27 +2257,32 @@
+ *
+ * Polls the PHY status register for link, 'iterations' number of times.
+ **/
+-s32 igb_phy_has_link(struct e1000_hw *hw, u32 iterations,
+- u32 usec_interval, bool *success)
++s32 e1000_phy_has_link_generic(struct e1000_hw *hw, u32 iterations,
++ u32 usec_interval, bool *success)
+ {
+- s32 ret_val = 0;
++ s32 ret_val = E1000_SUCCESS;
+ u16 i, phy_status;
+
++ DEBUGFUNC("e1000_phy_has_link_generic");
++
++ if (!hw->phy.ops.read_reg)
++ return E1000_SUCCESS;
++
+ for (i = 0; i < iterations; i++) {
+ /* Some PHYs require the PHY_STATUS register to be read
+ * twice due to the link bit being sticky. No harm doing
+ * it across the board.
+ */
+ ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status);
+- if (ret_val && usec_interval > 0) {
++ if (ret_val) {
+ /* If the first read fails, another entity may have
+ * ownership of the resources, wait and try again to
+ * see if they have relinquished the resources yet.
+ */
+ if (usec_interval >= 1000)
+- mdelay(usec_interval/1000);
++ msec_delay(usec_interval/1000);
+ else
+- udelay(usec_interval);
++ usec_delay(usec_interval);
+ }
+ ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status);
+ if (ret_val)
+@@ -1663,18 +2290,18 @@
+ if (phy_status & MII_SR_LINK_STATUS)
+ break;
+ if (usec_interval >= 1000)
+- mdelay(usec_interval/1000);
++ msec_delay(usec_interval/1000);
+ else
+- udelay(usec_interval);
++ usec_delay(usec_interval);
+ }
+
+- *success = (i < iterations) ? true : false;
++ *success = (i < iterations);
+
+ return ret_val;
+ }
+
+ /**
+- * igb_get_cable_length_m88 - Determine cable length for m88 PHY
++ * e1000_get_cable_length_m88 - Determine cable length for m88 PHY
+ * @hw: pointer to the HW structure
+ *
+ * Reads the PHY specific status register to retrieve the cable length
+@@ -1688,37 +2315,40 @@
+ * 3 110 - 140 meters
+ * 4 > 140 meters
+ **/
+-s32 igb_get_cable_length_m88(struct e1000_hw *hw)
++s32 e1000_get_cable_length_m88(struct e1000_hw *hw)
+ {
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u16 phy_data, index;
+
++ DEBUGFUNC("e1000_get_cable_length_m88");
++
+ ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data);
+ if (ret_val)
+- goto out;
++ return ret_val;
+
+- index = (phy_data & M88E1000_PSSR_CABLE_LENGTH) >>
+- M88E1000_PSSR_CABLE_LENGTH_SHIFT;
+- if (index >= M88E1000_CABLE_LENGTH_TABLE_SIZE - 1) {
+- ret_val = -E1000_ERR_PHY;
+- goto out;
+- }
++ index = ((phy_data & M88E1000_PSSR_CABLE_LENGTH) >>
++ M88E1000_PSSR_CABLE_LENGTH_SHIFT);
++
++ if (index >= M88E1000_CABLE_LENGTH_TABLE_SIZE - 1)
++ return -E1000_ERR_PHY;
+
+ phy->min_cable_length = e1000_m88_cable_length_table[index];
+ phy->max_cable_length = e1000_m88_cable_length_table[index + 1];
+
+ phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2;
+
+-out:
+- return ret_val;
++ return E1000_SUCCESS;
+ }
+
+-s32 igb_get_cable_length_m88_gen2(struct e1000_hw *hw)
++s32 e1000_get_cable_length_m88_gen2(struct e1000_hw *hw)
+ {
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+- u16 phy_data, phy_data2, index, default_page, is_cm;
++ u16 phy_data, phy_data2, is_cm;
++ u16 index, default_page;
++
++ DEBUGFUNC("e1000_get_cable_length_m88_gen2");
+
+ switch (hw->phy.id) {
+ case I210_I_PHY_ID:
+@@ -1743,27 +2373,29 @@
+ phy->cable_length = phy_data / (is_cm ? 100 : 1);
+ break;
+ case M88E1543_E_PHY_ID:
++ case M88E1512_E_PHY_ID:
++ case M88E1340M_E_PHY_ID:
+ case I347AT4_E_PHY_ID:
+ /* Remember the original page select and set it to 7 */
+ ret_val = phy->ops.read_reg(hw, I347AT4_PAGE_SELECT,
+ &default_page);
+ if (ret_val)
+- goto out;
++ return ret_val;
+
+ ret_val = phy->ops.write_reg(hw, I347AT4_PAGE_SELECT, 0x07);
+ if (ret_val)
+- goto out;
++ return ret_val;
+
+ /* Get cable length from PHY Cable Diagnostics Control Reg */
+ ret_val = phy->ops.read_reg(hw, (I347AT4_PCDL + phy->addr),
+ &phy_data);
+ if (ret_val)
+- goto out;
++ return ret_val;
+
+ /* Check if the unit of cable length is meters or cm */
+ ret_val = phy->ops.read_reg(hw, I347AT4_PCDC, &phy_data2);
+ if (ret_val)
+- goto out;
++ return ret_val;
+
+ is_cm = !(phy_data2 & I347AT4_PCDC_CABLE_LENGTH_UNIT);
+
+@@ -1772,34 +2404,34 @@
+ phy->max_cable_length = phy_data / (is_cm ? 100 : 1);
+ phy->cable_length = phy_data / (is_cm ? 100 : 1);
+
+- /* Reset the page selec to its original value */
++ /* Reset the page select to its original value */
+ ret_val = phy->ops.write_reg(hw, I347AT4_PAGE_SELECT,
+ default_page);
+ if (ret_val)
+- goto out;
++ return ret_val;
+ break;
++
+ case M88E1112_E_PHY_ID:
+ /* Remember the original page select and set it to 5 */
+ ret_val = phy->ops.read_reg(hw, I347AT4_PAGE_SELECT,
+ &default_page);
+ if (ret_val)
+- goto out;
++ return ret_val;
+
+ ret_val = phy->ops.write_reg(hw, I347AT4_PAGE_SELECT, 0x05);
+ if (ret_val)
+- goto out;
++ return ret_val;
+
+ ret_val = phy->ops.read_reg(hw, M88E1112_VCT_DSP_DISTANCE,
+ &phy_data);
+ if (ret_val)
+- goto out;
++ return ret_val;
+
+ index = (phy_data & M88E1000_PSSR_CABLE_LENGTH) >>
+ M88E1000_PSSR_CABLE_LENGTH_SHIFT;
+- if (index >= M88E1000_CABLE_LENGTH_TABLE_SIZE - 1) {
+- ret_val = -E1000_ERR_PHY;
+- goto out;
+- }
++
++ if (index >= M88E1000_CABLE_LENGTH_TABLE_SIZE - 1)
++ return -E1000_ERR_PHY;
+
+ phy->min_cable_length = e1000_m88_cable_length_table[index];
+ phy->max_cable_length = e1000_m88_cable_length_table[index + 1];
+@@ -1811,20 +2443,18 @@
+ ret_val = phy->ops.write_reg(hw, I347AT4_PAGE_SELECT,
+ default_page);
+ if (ret_val)
+- goto out;
++ return ret_val;
+
+ break;
+ default:
+- ret_val = -E1000_ERR_PHY;
+- goto out;
++ return -E1000_ERR_PHY;
+ }
+
+-out:
+ return ret_val;
+ }
+
+ /**
+- * igb_get_cable_length_igp_2 - Determine cable length for igp2 PHY
++ * e1000_get_cable_length_igp_2 - Determine cable length for igp2 PHY
+ * @hw: pointer to the HW structure
+ *
+ * The automatic gain control (agc) normalizes the amplitude of the
+@@ -1834,10 +2464,10 @@
+ * into a lookup table to obtain the approximate cable length
+ * for each channel.
+ **/
+-s32 igb_get_cable_length_igp_2(struct e1000_hw *hw)
++s32 e1000_get_cable_length_igp_2(struct e1000_hw *hw)
+ {
+ struct e1000_phy_info *phy = &hw->phy;
+- s32 ret_val = 0;
++ s32 ret_val;
+ u16 phy_data, i, agc_value = 0;
+ u16 cur_agc_index, max_agc_index = 0;
+ u16 min_agc_index = IGP02E1000_CABLE_LENGTH_TABLE_SIZE - 1;
+@@ -1848,26 +2478,26 @@
+ IGP02E1000_PHY_AGC_D
+ };
+
++ DEBUGFUNC("e1000_get_cable_length_igp_2");
++
+ /* Read the AGC registers for all channels */
+ for (i = 0; i < IGP02E1000_PHY_CHANNEL_NUM; i++) {
+ ret_val = phy->ops.read_reg(hw, agc_reg_array[i], &phy_data);
+ if (ret_val)
+- goto out;
++ return ret_val;
+
+ /* Getting bits 15:9, which represent the combination of
+ * coarse and fine gain values. The result is a number
+ * that can be put into the lookup table to obtain the
+ * approximate cable length.
+ */
+- cur_agc_index = (phy_data >> IGP02E1000_AGC_LENGTH_SHIFT) &
+- IGP02E1000_AGC_LENGTH_MASK;
++ cur_agc_index = ((phy_data >> IGP02E1000_AGC_LENGTH_SHIFT) &
++ IGP02E1000_AGC_LENGTH_MASK);
+
+ /* Array index bound check. */
+ if ((cur_agc_index >= IGP02E1000_CABLE_LENGTH_TABLE_SIZE) ||
+- (cur_agc_index == 0)) {
+- ret_val = -E1000_ERR_PHY;
+- goto out;
+- }
++ (cur_agc_index == 0))
++ return -E1000_ERR_PHY;
+
+ /* Remove min & max AGC values from calculation. */
+ if (e1000_igp_2_cable_length_table[min_agc_index] >
+@@ -1885,18 +2515,17 @@
+ agc_value /= (IGP02E1000_PHY_CHANNEL_NUM - 2);
+
+ /* Calculate cable length with the error range of +/- 10 meters. */
+- phy->min_cable_length = ((agc_value - IGP02E1000_AGC_RANGE) > 0) ?
+- (agc_value - IGP02E1000_AGC_RANGE) : 0;
++ phy->min_cable_length = (((agc_value - IGP02E1000_AGC_RANGE) > 0) ?
++ (agc_value - IGP02E1000_AGC_RANGE) : 0);
+ phy->max_cable_length = agc_value + IGP02E1000_AGC_RANGE;
+
+ phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2;
+
+-out:
+- return ret_val;
++ return E1000_SUCCESS;
+ }
+
+ /**
+- * igb_get_phy_info_m88 - Retrieve PHY information
++ * e1000_get_phy_info_m88 - Retrieve PHY information
+ * @hw: pointer to the HW structure
+ *
+ * Valid for only copper links. Read the PHY status register (sticky read)
+@@ -1905,54 +2534,54 @@
+ * special status register to determine MDI/MDIx and current speed. If
+ * speed is 1000, then determine cable length, local and remote receiver.
+ **/
+-s32 igb_get_phy_info_m88(struct e1000_hw *hw)
++s32 e1000_get_phy_info_m88(struct e1000_hw *hw)
+ {
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u16 phy_data;
+ bool link;
+
++ DEBUGFUNC("e1000_get_phy_info_m88");
++
+ if (phy->media_type != e1000_media_type_copper) {
+- hw_dbg("Phy info is only valid for copper media\n");
+- ret_val = -E1000_ERR_CONFIG;
+- goto out;
++ DEBUGOUT("Phy info is only valid for copper media\n");
++ return -E1000_ERR_CONFIG;
+ }
+
+- ret_val = igb_phy_has_link(hw, 1, 0, &link);
++ ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link);
+ if (ret_val)
+- goto out;
++ return ret_val;
+
+ if (!link) {
+- hw_dbg("Phy info is only valid if link is up\n");
+- ret_val = -E1000_ERR_CONFIG;
+- goto out;
++ DEBUGOUT("Phy info is only valid if link is up\n");
++ return -E1000_ERR_CONFIG;
+ }
+
+ ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
+ if (ret_val)
+- goto out;
++ return ret_val;
+
+- phy->polarity_correction = (phy_data & M88E1000_PSCR_POLARITY_REVERSAL)
+- ? true : false;
++ phy->polarity_correction = !!(phy_data &
++ M88E1000_PSCR_POLARITY_REVERSAL);
+
+- ret_val = igb_check_polarity_m88(hw);
++ ret_val = igb_e1000_check_polarity_m88(hw);
+ if (ret_val)
+- goto out;
++ return ret_val;
+
+ ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data);
+ if (ret_val)
+- goto out;
++ return ret_val;
+
+- phy->is_mdix = (phy_data & M88E1000_PSSR_MDIX) ? true : false;
++ phy->is_mdix = !!(phy_data & M88E1000_PSSR_MDIX);
+
+ if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS) {
+- ret_val = phy->ops.get_cable_length(hw);
++ ret_val = hw->phy.ops.get_cable_length(hw);
+ if (ret_val)
+- goto out;
++ return ret_val;
+
+ ret_val = phy->ops.read_reg(hw, PHY_1000T_STATUS, &phy_data);
+ if (ret_val)
+- goto out;
++ return ret_val;
+
+ phy->local_rx = (phy_data & SR_1000T_LOCAL_RX_STATUS)
+ ? e1000_1000t_rx_status_ok
+@@ -1968,12 +2597,11 @@
+ phy->remote_rx = e1000_1000t_rx_status_undefined;
+ }
+
+-out:
+ return ret_val;
+ }
+
+ /**
+- * igb_get_phy_info_igp - Retrieve igp PHY information
++ * e1000_get_phy_info_igp - Retrieve igp PHY information
+ * @hw: pointer to the HW structure
+ *
+ * Read PHY status to determine if link is up. If link is up, then
+@@ -1981,44 +2609,45 @@
+ * PHY port status to determine MDI/MDIx and speed. Based on the speed,
+ * determine on the cable length, local and remote receiver.
+ **/
+-s32 igb_get_phy_info_igp(struct e1000_hw *hw)
++s32 e1000_get_phy_info_igp(struct e1000_hw *hw)
+ {
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u16 data;
+ bool link;
+
+- ret_val = igb_phy_has_link(hw, 1, 0, &link);
++ DEBUGFUNC("e1000_get_phy_info_igp");
++
++ ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link);
+ if (ret_val)
+- goto out;
++ return ret_val;
+
+ if (!link) {
+- hw_dbg("Phy info is only valid if link is up\n");
+- ret_val = -E1000_ERR_CONFIG;
+- goto out;
++ DEBUGOUT("Phy info is only valid if link is up\n");
++ return -E1000_ERR_CONFIG;
+ }
+
+ phy->polarity_correction = true;
+
+- ret_val = igb_check_polarity_igp(hw);
++ ret_val = igb_e1000_check_polarity_igp(hw);
+ if (ret_val)
+- goto out;
++ return ret_val;
+
+ ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_STATUS, &data);
+ if (ret_val)
+- goto out;
++ return ret_val;
+
+- phy->is_mdix = (data & IGP01E1000_PSSR_MDIX) ? true : false;
++ phy->is_mdix = !!(data & IGP01E1000_PSSR_MDIX);
+
+ if ((data & IGP01E1000_PSSR_SPEED_MASK) ==
+ IGP01E1000_PSSR_SPEED_1000MBPS) {
+ ret_val = phy->ops.get_cable_length(hw);
+ if (ret_val)
+- goto out;
++ return ret_val;
+
+ ret_val = phy->ops.read_reg(hw, PHY_1000T_STATUS, &data);
+ if (ret_val)
+- goto out;
++ return ret_val;
+
+ phy->local_rx = (data & SR_1000T_LOCAL_RX_STATUS)
+ ? e1000_1000t_rx_status_ok
+@@ -2033,42 +2662,97 @@
+ phy->remote_rx = e1000_1000t_rx_status_undefined;
+ }
+
+-out:
+ return ret_val;
+ }
+
+ /**
+- * igb_phy_sw_reset - PHY software reset
++ * igb_e1000_get_phy_info_ife - Retrieves various IFE PHY states
++ * @hw: pointer to the HW structure
++ *
++ * Populates "phy" structure with various feature states.
++ **/
++/* Changed name, duplicated with e1000 */
++s32 igb_e1000_get_phy_info_ife(struct e1000_hw *hw)
++{
++ struct e1000_phy_info *phy = &hw->phy;
++ s32 ret_val;
++ u16 data;
++ bool link;
++
++ DEBUGFUNC("igb_e1000_get_phy_info_ife");
++
++ ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link);
++ if (ret_val)
++ return ret_val;
++
++ if (!link) {
++ DEBUGOUT("Phy info is only valid if link is up\n");
++ return -E1000_ERR_CONFIG;
++ }
++
++ ret_val = phy->ops.read_reg(hw, IFE_PHY_SPECIAL_CONTROL, &data);
++ if (ret_val)
++ return ret_val;
++ phy->polarity_correction = !(data & IFE_PSC_AUTO_POLARITY_DISABLE);
++
++ if (phy->polarity_correction) {
++ ret_val = igb_e1000_check_polarity_ife(hw);
++ if (ret_val)
++ return ret_val;
++ } else {
++ /* Polarity is forced */
++ phy->cable_polarity = ((data & IFE_PSC_FORCE_POLARITY)
++ ? e1000_rev_polarity_reversed
++ : e1000_rev_polarity_normal);
++ }
++
++ ret_val = phy->ops.read_reg(hw, IFE_PHY_MDIX_CONTROL, &data);
++ if (ret_val)
++ return ret_val;
++
++ phy->is_mdix = !!(data & IFE_PMC_MDIX_STATUS);
++
++ /* The following parameters are undefined for 10/100 operation. */
++ phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED;
++ phy->local_rx = e1000_1000t_rx_status_undefined;
++ phy->remote_rx = e1000_1000t_rx_status_undefined;
++
++ return E1000_SUCCESS;
++}
++
++/**
++ * e1000_phy_sw_reset_generic - PHY software reset
+ * @hw: pointer to the HW structure
+ *
+ * Does a software reset of the PHY by reading the PHY control register and
+ * setting/write the control register reset bit to the PHY.
+ **/
+-s32 igb_phy_sw_reset(struct e1000_hw *hw)
++s32 e1000_phy_sw_reset_generic(struct e1000_hw *hw)
+ {
+- s32 ret_val = 0;
++ s32 ret_val;
+ u16 phy_ctrl;
+
+- if (!(hw->phy.ops.read_reg))
+- goto out;
++ DEBUGFUNC("e1000_phy_sw_reset_generic");
++
++ if (!hw->phy.ops.read_reg)
++ return E1000_SUCCESS;
+
+ ret_val = hw->phy.ops.read_reg(hw, PHY_CONTROL, &phy_ctrl);
+ if (ret_val)
+- goto out;
++ return ret_val;
+
+ phy_ctrl |= MII_CR_RESET;
+ ret_val = hw->phy.ops.write_reg(hw, PHY_CONTROL, phy_ctrl);
+ if (ret_val)
+- goto out;
++ return ret_val;
+
+- udelay(1);
++ usec_delay(1);
+
+-out:
+ return ret_val;
+ }
+
+ /**
+- * igb_phy_hw_reset - PHY hardware reset
++ * e1000_phy_hw_reset_generic - PHY hardware reset
+ * @hw: pointer to the HW structure
+ *
+ * Verify the reset block is not blocking us from resetting. Acquire
+@@ -2076,50 +2760,65 @@
+ * bit in the PHY. Wait the appropriate delay time for the device to
+ * reset and release the semaphore (if necessary).
+ **/
+-s32 igb_phy_hw_reset(struct e1000_hw *hw)
++s32 e1000_phy_hw_reset_generic(struct e1000_hw *hw)
+ {
+ struct e1000_phy_info *phy = &hw->phy;
+- s32 ret_val;
++ s32 ret_val;
+ u32 ctrl;
+
+- ret_val = igb_check_reset_block(hw);
+- if (ret_val) {
+- ret_val = 0;
+- goto out;
++ DEBUGFUNC("e1000_phy_hw_reset_generic");
++
++ if (phy->ops.check_reset_block) {
++ ret_val = phy->ops.check_reset_block(hw);
++ if (ret_val)
++ return E1000_SUCCESS;
+ }
+
+ ret_val = phy->ops.acquire(hw);
+ if (ret_val)
+- goto out;
++ return ret_val;
+
+- ctrl = rd32(E1000_CTRL);
+- wr32(E1000_CTRL, ctrl | E1000_CTRL_PHY_RST);
+- wrfl();
++ ctrl = E1000_READ_REG(hw, E1000_CTRL);
++ E1000_WRITE_REG(hw, E1000_CTRL, ctrl | E1000_CTRL_PHY_RST);
++ E1000_WRITE_FLUSH(hw);
+
+- udelay(phy->reset_delay_us);
++ usec_delay(phy->reset_delay_us);
+
+- wr32(E1000_CTRL, ctrl);
+- wrfl();
++ E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
++ E1000_WRITE_FLUSH(hw);
+
+- udelay(150);
++ usec_delay(150);
+
+ phy->ops.release(hw);
+
+- ret_val = phy->ops.get_cfg_done(hw);
++ return phy->ops.get_cfg_done(hw);
++}
+
+-out:
+- return ret_val;
++/**
++ * e1000_get_cfg_done_generic - Generic configuration done
++ * @hw: pointer to the HW structure
++ *
++ * Generic function to wait 10 milli-seconds for configuration to complete
++ * and return success.
++ **/
++s32 e1000_get_cfg_done_generic(struct e1000_hw E1000_UNUSEDARG *hw)
++{
++ DEBUGFUNC("e1000_get_cfg_done_generic");
++
++ msec_delay_irq(10);
++
++ return E1000_SUCCESS;
+ }
+
+ /**
+- * igb_phy_init_script_igp3 - Inits the IGP3 PHY
++ * e1000_phy_init_script_igp3 - Inits the IGP3 PHY
+ * @hw: pointer to the HW structure
+ *
+ * Initializes a Intel Gigabit PHY3 when an EEPROM is not present.
+ **/
+-s32 igb_phy_init_script_igp3(struct e1000_hw *hw)
++s32 e1000_phy_init_script_igp3(struct e1000_hw *hw)
+ {
+- hw_dbg("Running IGP 3 PHY init script\n");
++ DEBUGOUT("Running IGP 3 PHY init script\n");
+
+ /* PHY init IGP 3 */
+ /* Enable rise/fall, 10-mode work in class-A */
+@@ -2130,7 +2829,7 @@
+ hw->phy.ops.write_reg(hw, 0x2FB1, 0x8B24);
+ /* Increase Hybrid poly bias */
+ hw->phy.ops.write_reg(hw, 0x2FB2, 0xF8F0);
+- /* Add 4% to TX amplitude in Giga mode */
++ /* Add 4% to Tx amplitude in Gig mode */
+ hw->phy.ops.write_reg(hw, 0x2010, 0x10B0);
+ /* Disable trimming (TTT) */
+ hw->phy.ops.write_reg(hw, 0x2011, 0x0000);
+@@ -2191,17 +2890,106 @@
+ /* Restart AN, Speed selection is 1000 */
+ hw->phy.ops.write_reg(hw, 0x0000, 0x1340);
+
+- return 0;
++ return E1000_SUCCESS;
++}
++
++/**
++ * e1000_get_phy_type_from_id - Get PHY type from id
++ * @phy_id: phy_id read from the phy
++ *
++ * Returns the phy type from the id.
++ **/
++enum e1000_phy_type e1000_get_phy_type_from_id(u32 phy_id)
++{
++ enum e1000_phy_type phy_type = e1000_phy_unknown;
++
++ switch (phy_id) {
++ case M88E1000_I_PHY_ID:
++ case M88E1000_E_PHY_ID:
++ case M88E1111_I_PHY_ID:
++ case M88E1011_I_PHY_ID:
++ case M88E1543_E_PHY_ID:
++ case M88E1512_E_PHY_ID:
++ case I347AT4_E_PHY_ID:
++ case M88E1112_E_PHY_ID:
++ case M88E1340M_E_PHY_ID:
++ phy_type = e1000_phy_m88;
++ break;
++ case IGP01E1000_I_PHY_ID: /* IGP 1 & 2 share this */
++ phy_type = e1000_phy_igp_2;
++ break;
++ case GG82563_E_PHY_ID:
++ phy_type = e1000_phy_gg82563;
++ break;
++ case IGP03E1000_E_PHY_ID:
++ phy_type = e1000_phy_igp_3;
++ break;
++ case IFE_E_PHY_ID:
++ case IFE_PLUS_E_PHY_ID:
++ case IFE_C_E_PHY_ID:
++ phy_type = e1000_phy_ife;
++ break;
++ case I82580_I_PHY_ID:
++ phy_type = e1000_phy_82580;
++ break;
++ case I210_I_PHY_ID:
++ phy_type = e1000_phy_i210;
++ break;
++ default:
++ phy_type = e1000_phy_unknown;
++ break;
++ }
++ return phy_type;
++}
++
++/**
++ * e1000_determine_phy_address - Determines PHY address.
++ * @hw: pointer to the HW structure
++ *
++ * This uses a trial and error method to loop through possible PHY
++ * addresses. It tests each by reading the PHY ID registers and
++ * checking for a match.
++ **/
++s32 e1000_determine_phy_address(struct e1000_hw *hw)
++{
++ u32 phy_addr = 0;
++ u32 i;
++ enum e1000_phy_type phy_type = e1000_phy_unknown;
++
++ hw->phy.id = phy_type;
++
++ for (phy_addr = 0; phy_addr < E1000_MAX_PHY_ADDR; phy_addr++) {
++ hw->phy.addr = phy_addr;
++ i = 0;
++
++ do {
++ e1000_get_phy_id(hw);
++ phy_type = e1000_get_phy_type_from_id(hw->phy.id);
++
++ /* If phy_type is valid, break - we found our
++ * PHY address
++ */
++ if (phy_type != e1000_phy_unknown)
++ return E1000_SUCCESS;
++
++ msec_delay(1);
++ i++;
++ } while (i < 10);
++ }
++
++ return -E1000_ERR_PHY_TYPE;
+ }
+
+ /**
+- * igb_power_up_phy_copper - Restore copper link in case of PHY power down
++ * igb_e1000_power_up_phy_copper - Restore copper link in case of PHY power down
+ * @hw: pointer to the HW structure
+ *
+ * In the case of a PHY power down to save power, or to turn off link during a
+- * driver unload, restore the link to previous settings.
++ * driver unload, or wake on lan is not enabled, restore the link to previous
++ * settings.
+ **/
+-void igb_power_up_phy_copper(struct e1000_hw *hw)
++/* Changed name, duplicated with e1000 */
++void igb_e1000_power_up_phy_copper(struct e1000_hw *hw)
+ {
+ u16 mii_reg = 0;
+
+@@ -2212,13 +3000,15 @@
+ }
+
+ /**
+- * igb_power_down_phy_copper - Power down copper PHY
++ * igb_e1000_power_down_phy_copper - Restore copper link in case of PHY power down
+ * @hw: pointer to the HW structure
+ *
+- * Power down PHY to save power when interface is down and wake on lan
+- * is not enabled.
++ * In the case of a PHY power down to save power, or to turn off link during a
++ * driver unload, or wake on lan is not enabled, restore the link to previous
++ * settings.
+ **/
+-void igb_power_down_phy_copper(struct e1000_hw *hw)
++/* Changed name, duplicated with e1000 */
++void igb_e1000_power_down_phy_copper(struct e1000_hw *hw)
+ {
+ u16 mii_reg = 0;
+
+@@ -2226,98 +3016,85 @@
+ hw->phy.ops.read_reg(hw, PHY_CONTROL, &mii_reg);
+ mii_reg |= MII_CR_POWER_DOWN;
+ hw->phy.ops.write_reg(hw, PHY_CONTROL, mii_reg);
+- usleep_range(1000, 2000);
++ msec_delay(1);
+ }
+
+ /**
+- * igb_check_polarity_82580 - Checks the polarity.
++ * igb_e1000_check_polarity_82577 - Checks the polarity.
+ * @hw: pointer to the HW structure
+ *
+ * Success returns 0, Failure returns -E1000_ERR_PHY (-2)
+ *
+ * Polarity is determined based on the PHY specific status register.
+ **/
+-static s32 igb_check_polarity_82580(struct e1000_hw *hw)
++/* Changed name, duplicated with e1000 */
++s32 igb_e1000_check_polarity_82577(struct e1000_hw *hw)
+ {
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u16 data;
+
++ DEBUGFUNC("igb_e1000_check_polarity_82577");
+
+- ret_val = phy->ops.read_reg(hw, I82580_PHY_STATUS_2, &data);
++ ret_val = phy->ops.read_reg(hw, I82577_PHY_STATUS_2, &data);
+
+ if (!ret_val)
+- phy->cable_polarity = (data & I82580_PHY_STATUS2_REV_POLARITY)
+- ? e1000_rev_polarity_reversed
+- : e1000_rev_polarity_normal;
++ phy->cable_polarity = ((data & I82577_PHY_STATUS2_REV_POLARITY)
++ ? e1000_rev_polarity_reversed
++ : e1000_rev_polarity_normal);
+
+ return ret_val;
+ }
+
+ /**
+- * igb_phy_force_speed_duplex_82580 - Force speed/duplex for I82580 PHY
++ * igb_e1000_phy_force_speed_duplex_82577 - Force speed/duplex for I82577 PHY
+ * @hw: pointer to the HW structure
+ *
+- * Calls the PHY setup function to force speed and duplex. Clears the
+- * auto-crossover to force MDI manually. Waits for link and returns
+- * successful if link up is successful, else -E1000_ERR_PHY (-2).
++ * Calls the PHY setup function to force speed and duplex.
+ **/
+-s32 igb_phy_force_speed_duplex_82580(struct e1000_hw *hw)
++/* Changed name, duplicated with e1000 */
++s32 igb_e1000_phy_force_speed_duplex_82577(struct e1000_hw *hw)
+ {
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u16 phy_data;
+ bool link;
+
++ DEBUGFUNC("igb_e1000_phy_force_speed_duplex_82577");
++
+ ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_data);
+ if (ret_val)
+- goto out;
++ return ret_val;
+
+- igb_phy_force_speed_duplex_setup(hw, &phy_data);
++ e1000_phy_force_speed_duplex_setup(hw, &phy_data);
+
+ ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_data);
+ if (ret_val)
+- goto out;
+-
+- /* Clear Auto-Crossover to force MDI manually. 82580 requires MDI
+- * forced whenever speed and duplex are forced.
+- */
+- ret_val = phy->ops.read_reg(hw, I82580_PHY_CTRL_2, &phy_data);
+- if (ret_val)
+- goto out;
+-
+- phy_data &= ~I82580_PHY_CTRL2_MDIX_CFG_MASK;
+-
+- ret_val = phy->ops.write_reg(hw, I82580_PHY_CTRL_2, phy_data);
+- if (ret_val)
+- goto out;
+-
+- hw_dbg("I82580_PHY_CTRL_2: %X\n", phy_data);
++ return ret_val;
+
+- udelay(1);
++ usec_delay(1);
+
+ if (phy->autoneg_wait_to_complete) {
+- hw_dbg("Waiting for forced speed/duplex link on 82580 phy\n");
++ DEBUGOUT("Waiting for forced speed/duplex link on 82577 phy\n");
+
+- ret_val = igb_phy_has_link(hw, PHY_FORCE_LIMIT, 100000, &link);
++ ret_val = e1000_phy_has_link_generic(hw, PHY_FORCE_LIMIT,
++ 100000, &link);
+ if (ret_val)
+- goto out;
++ return ret_val;
+
+ if (!link)
+- hw_dbg("Link taking longer than expected.\n");
++ DEBUGOUT("Link taking longer than expected.\n");
+
+ /* Try once more */
+- ret_val = igb_phy_has_link(hw, PHY_FORCE_LIMIT, 100000, &link);
+- if (ret_val)
+- goto out;
++ ret_val = e1000_phy_has_link_generic(hw, PHY_FORCE_LIMIT,
++ 100000, &link);
+ }
+
+-out:
+ return ret_val;
+ }
+
+ /**
+- * igb_get_phy_info_82580 - Retrieve I82580 PHY information
++ * igb_e1000_get_phy_info_82577 - Retrieve I82577 PHY information
+ * @hw: pointer to the HW structure
+ *
+ * Read PHY status to determine if link is up. If link is up, then
+@@ -2325,44 +3102,46 @@
+ * PHY port status to determine MDI/MDIx and speed. Based on the speed,
+ * determine on the cable length, local and remote receiver.
+ **/
+-s32 igb_get_phy_info_82580(struct e1000_hw *hw)
++/* Changed name, duplicated with e1000 */
++s32 igb_e1000_get_phy_info_82577(struct e1000_hw *hw)
+ {
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u16 data;
+ bool link;
+
+- ret_val = igb_phy_has_link(hw, 1, 0, &link);
++ DEBUGFUNC("igb_e1000_get_phy_info_82577");
++
++ ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link);
+ if (ret_val)
+- goto out;
++ return ret_val;
+
+ if (!link) {
+- hw_dbg("Phy info is only valid if link is up\n");
+- ret_val = -E1000_ERR_CONFIG;
+- goto out;
++ DEBUGOUT("Phy info is only valid if link is up\n");
++ return -E1000_ERR_CONFIG;
+ }
+
+ phy->polarity_correction = true;
+
+- ret_val = igb_check_polarity_82580(hw);
++ ret_val = igb_e1000_check_polarity_82577(hw);
+ if (ret_val)
+- goto out;
++ return ret_val;
+
+- ret_val = phy->ops.read_reg(hw, I82580_PHY_STATUS_2, &data);
++ ret_val = phy->ops.read_reg(hw, I82577_PHY_STATUS_2, &data);
+ if (ret_val)
+- goto out;
++ return ret_val;
+
+- phy->is_mdix = (data & I82580_PHY_STATUS2_MDIX) ? true : false;
++ phy->is_mdix = !!(data & I82577_PHY_STATUS2_MDIX);
+
+- if ((data & I82580_PHY_STATUS2_SPEED_MASK) ==
+- I82580_PHY_STATUS2_SPEED_1000MBPS) {
++ if ((data & I82577_PHY_STATUS2_SPEED_MASK) ==
++ I82577_PHY_STATUS2_SPEED_1000MBPS) {
+ ret_val = hw->phy.ops.get_cable_length(hw);
+ if (ret_val)
+- goto out;
++ return ret_val;
+
+ ret_val = phy->ops.read_reg(hw, PHY_1000T_STATUS, &data);
+ if (ret_val)
+- goto out;
++ return ret_val;
+
+ phy->local_rx = (data & SR_1000T_LOCAL_RX_STATUS)
+ ? e1000_1000t_rx_status_ok
+@@ -2377,63 +3156,65 @@
+ phy->remote_rx = e1000_1000t_rx_status_undefined;
+ }
+
+-out:
+- return ret_val;
++ return E1000_SUCCESS;
+ }
+
+ /**
+- * igb_get_cable_length_82580 - Determine cable length for 82580 PHY
++ * igb_e1000_get_cable_length_82577 - Determine cable length for 82577 PHY
+ * @hw: pointer to the HW structure
+ *
+ * Reads the diagnostic status register and verifies result is valid before
+ * placing it in the phy_cable_length field.
+ **/
+-s32 igb_get_cable_length_82580(struct e1000_hw *hw)
++/* Changed name, duplicated with e1000 */
++s32 igb_e1000_get_cable_length_82577(struct e1000_hw *hw)
+ {
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u16 phy_data, length;
+
+- ret_val = phy->ops.read_reg(hw, I82580_PHY_DIAG_STATUS, &phy_data);
++ DEBUGFUNC("igb_e1000_get_cable_length_82577");
++
++ ret_val = phy->ops.read_reg(hw, I82577_PHY_DIAG_STATUS, &phy_data);
+ if (ret_val)
+- goto out;
++ return ret_val;
+
+- length = (phy_data & I82580_DSTATUS_CABLE_LENGTH) >>
+- I82580_DSTATUS_CABLE_LENGTH_SHIFT;
++ length = ((phy_data & I82577_DSTATUS_CABLE_LENGTH) >>
++ I82577_DSTATUS_CABLE_LENGTH_SHIFT);
+
+ if (length == E1000_CABLE_LENGTH_UNDEFINED)
+- ret_val = -E1000_ERR_PHY;
++ return -E1000_ERR_PHY;
+
+ phy->cable_length = length;
+
+-out:
+- return ret_val;
++ return E1000_SUCCESS;
+ }
+
+ /**
+- * igb_write_phy_reg_gs40g - Write GS40G PHY register
++ * e1000_write_phy_reg_gs40g - Write GS40G PHY register
+ * @hw: pointer to the HW structure
+- * @offset: lower half is register offset to write to
+- * upper half is page to use.
++ * @offset: register offset to write to
+ * @data: data to write at register offset
+ *
+ * Acquires semaphore, if necessary, then writes the data to PHY register
+ * at the offset. Release any acquired semaphores before exiting.
+ **/
+-s32 igb_write_phy_reg_gs40g(struct e1000_hw *hw, u32 offset, u16 data)
++s32 e1000_write_phy_reg_gs40g(struct e1000_hw *hw, u32 offset, u16 data)
+ {
+ s32 ret_val;
+ u16 page = offset >> GS40G_PAGE_SHIFT;
+
++ DEBUGFUNC("e1000_write_phy_reg_gs40g");
++
+ offset = offset & GS40G_OFFSET_MASK;
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ return ret_val;
+
+- ret_val = igb_write_phy_reg_mdic(hw, GS40G_PAGE_SELECT, page);
++ ret_val = e1000_write_phy_reg_mdic(hw, GS40G_PAGE_SELECT, page);
+ if (ret_val)
+ goto release;
+- ret_val = igb_write_phy_reg_mdic(hw, offset, data);
++ ret_val = e1000_write_phy_reg_mdic(hw, offset, data);
+
+ release:
+ hw->phy.ops.release(hw);
+@@ -2441,7 +3222,7 @@
+ }
+
+ /**
+- * igb_read_phy_reg_gs40g - Read GS40G PHY register
++ * e1000_read_phy_reg_gs40g - Read GS40G PHY register
+ * @hw: pointer to the HW structure
+ * @offset: lower half is register offset to read to
+ * upper half is page to use.
+@@ -2450,20 +3231,22 @@
+ * Acquires semaphore, if necessary, then reads the data in the PHY register
+ * at the offset. Release any acquired semaphores before exiting.
+ **/
+-s32 igb_read_phy_reg_gs40g(struct e1000_hw *hw, u32 offset, u16 *data)
++s32 e1000_read_phy_reg_gs40g(struct e1000_hw *hw, u32 offset, u16 *data)
+ {
+ s32 ret_val;
+ u16 page = offset >> GS40G_PAGE_SHIFT;
+
++ DEBUGFUNC("e1000_read_phy_reg_gs40g");
++
+ offset = offset & GS40G_OFFSET_MASK;
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ return ret_val;
+
+- ret_val = igb_write_phy_reg_mdic(hw, GS40G_PAGE_SELECT, page);
++ ret_val = e1000_write_phy_reg_mdic(hw, GS40G_PAGE_SELECT, page);
+ if (ret_val)
+ goto release;
+- ret_val = igb_read_phy_reg_mdic(hw, offset, data);
++ ret_val = e1000_read_phy_reg_mdic(hw, offset, data);
+
+ release:
+ hw->phy.ops.release(hw);
+@@ -2471,41 +3254,156 @@
+ }
+
+ /**
+- * igb_set_master_slave_mode - Setup PHY for Master/slave mode
++ * e1000_read_phy_reg_mphy - Read mPHY control register
+ * @hw: pointer to the HW structure
++ * @address: address to be read
++ * @data: pointer to the read data
+ *
+- * Sets up Master/slave mode
++ * Reads the mPHY control register in the PHY at offset and stores the
++ * information read to data.
+ **/
+-static s32 igb_set_master_slave_mode(struct e1000_hw *hw)
++s32 e1000_read_phy_reg_mphy(struct e1000_hw *hw, u32 address, u32 *data)
+ {
+- s32 ret_val;
+- u16 phy_data;
++ u32 mphy_ctrl = 0;
++ bool locked = false;
++ bool ready;
+
+- /* Resolve Master/Slave mode */
+- ret_val = hw->phy.ops.read_reg(hw, PHY_1000T_CTRL, &phy_data);
+- if (ret_val)
+- return ret_val;
++ DEBUGFUNC("e1000_read_phy_reg_mphy");
+
+- /* load defaults for future use */
+- hw->phy.original_ms_type = (phy_data & CR_1000T_MS_ENABLE) ?
+- ((phy_data & CR_1000T_MS_VALUE) ?
+- e1000_ms_force_master :
+- e1000_ms_force_slave) : e1000_ms_auto;
++ /* Check if mPHY is ready to read/write operations */
++ ready = e1000_is_mphy_ready(hw);
++ if (!ready)
++ return -E1000_ERR_PHY;
+
+- switch (hw->phy.ms_type) {
+- case e1000_ms_force_master:
+- phy_data |= (CR_1000T_MS_ENABLE | CR_1000T_MS_VALUE);
+- break;
+- case e1000_ms_force_slave:
+- phy_data |= CR_1000T_MS_ENABLE;
+- phy_data &= ~(CR_1000T_MS_VALUE);
+- break;
+- case e1000_ms_auto:
+- phy_data &= ~CR_1000T_MS_ENABLE;
+- /* fall-through */
+- default:
++ /* Check if mPHY access is disabled and enable it if so */
++ mphy_ctrl = E1000_READ_REG(hw, E1000_MPHY_ADDR_CTRL);
++ if (mphy_ctrl & E1000_MPHY_DIS_ACCESS) {
++ locked = true;
++ ready = e1000_is_mphy_ready(hw);
++ if (!ready)
++ return -E1000_ERR_PHY;
++ mphy_ctrl |= E1000_MPHY_ENA_ACCESS;
++ E1000_WRITE_REG(hw, E1000_MPHY_ADDR_CTRL, mphy_ctrl);
++ }
++
++ /* Set the address that we want to read */
++ ready = e1000_is_mphy_ready(hw);
++ if (!ready)
++ return -E1000_ERR_PHY;
++
++ /* We mask address, because we want to use only current lane */
++ mphy_ctrl = (mphy_ctrl & ~E1000_MPHY_ADDRESS_MASK &
++ ~E1000_MPHY_ADDRESS_FNC_OVERRIDE) |
++ (address & E1000_MPHY_ADDRESS_MASK);
++ E1000_WRITE_REG(hw, E1000_MPHY_ADDR_CTRL, mphy_ctrl);
++
++ /* Read data from the address */
++ ready = e1000_is_mphy_ready(hw);
++ if (!ready)
++ return -E1000_ERR_PHY;
++ *data = E1000_READ_REG(hw, E1000_MPHY_DATA);
++
++ /* Disable access to mPHY if it was originally disabled */
++ if (locked)
++ ready = e1000_is_mphy_ready(hw);
++ if (!ready)
++ return -E1000_ERR_PHY;
++ E1000_WRITE_REG(hw, E1000_MPHY_ADDR_CTRL,
++ E1000_MPHY_DIS_ACCESS);
++
++ return E1000_SUCCESS;
++}
++
++/**
++ * e1000_write_phy_reg_mphy - Write mPHY control register
++ * @hw: pointer to the HW structure
++ * @address: address to write to
++ * @data: data to write to register at offset
++ * @line_override: used when we want to use different line than default one
++ *
++ * Writes data to mPHY control register.
++ **/
++s32 e1000_write_phy_reg_mphy(struct e1000_hw *hw, u32 address, u32 data,
++ bool line_override)
++{
++ u32 mphy_ctrl = 0;
++ bool locked = false;
++ bool ready;
++
++ DEBUGFUNC("e1000_write_phy_reg_mphy");
++
++ /* Check if mPHY is ready to read/write operations */
++ ready = e1000_is_mphy_ready(hw);
++ if (!ready)
++ return -E1000_ERR_PHY;
++
++ /* Check if mPHY access is disabled and enable it if so */
++ mphy_ctrl = E1000_READ_REG(hw, E1000_MPHY_ADDR_CTRL);
++ if (mphy_ctrl & E1000_MPHY_DIS_ACCESS) {
++ locked = true;
++ ready = e1000_is_mphy_ready(hw);
++ if (!ready)
++ return -E1000_ERR_PHY;
++ mphy_ctrl |= E1000_MPHY_ENA_ACCESS;
++ E1000_WRITE_REG(hw, E1000_MPHY_ADDR_CTRL, mphy_ctrl);
++ }
++
++ /* Set the address that we want to read */
++ ready = e1000_is_mphy_ready(hw);
++ if (!ready)
++ return -E1000_ERR_PHY;
++
++ /* We mask address, because we want to use only current lane */
++ if (line_override)
++ mphy_ctrl |= E1000_MPHY_ADDRESS_FNC_OVERRIDE;
++ else
++ mphy_ctrl &= ~E1000_MPHY_ADDRESS_FNC_OVERRIDE;
++ mphy_ctrl = (mphy_ctrl & ~E1000_MPHY_ADDRESS_MASK) |
++ (address & E1000_MPHY_ADDRESS_MASK);
++ E1000_WRITE_REG(hw, E1000_MPHY_ADDR_CTRL, mphy_ctrl);
++
++ /* Read data from the address */
++ ready = e1000_is_mphy_ready(hw);
++ if (!ready)
++ return -E1000_ERR_PHY;
++ E1000_WRITE_REG(hw, E1000_MPHY_DATA, data);
++
++ /* Disable access to mPHY if it was originally disabled */
++ if (locked)
++ ready = e1000_is_mphy_ready(hw);
++ if (!ready)
++ return -E1000_ERR_PHY;
++ E1000_WRITE_REG(hw, E1000_MPHY_ADDR_CTRL,
++ E1000_MPHY_DIS_ACCESS);
++
++ return E1000_SUCCESS;
++}
++
++/**
++ * e1000_is_mphy_ready - Check if mPHY control register is not busy
++ * @hw: pointer to the HW structure
++ *
++ * Returns mPHY control register status.
++ **/
++bool e1000_is_mphy_ready(struct e1000_hw *hw)
++{
++ u16 retry_count = 0;
++ u32 mphy_ctrl = 0;
++ bool ready = false;
++
++ while (retry_count < 2) {
++ mphy_ctrl = E1000_READ_REG(hw, E1000_MPHY_ADDR_CTRL);
++ if (mphy_ctrl & E1000_MPHY_BUSY) {
++ usec_delay(20);
++ retry_count++;
++ continue;
++ }
++ ready = true;
+ break;
+ }
+
+- return hw->phy.ops.write_reg(hw, PHY_1000T_CTRL, phy_data);
++ if (!ready)
++ DEBUGOUT("ERROR READING mPHY control register, phy is busy.\n");
++
++ return ready;
+ }
+diff -Nu a/drivers/net/ethernet/intel/igb/e1000_phy.h b/drivers/net/ethernet/intel/igb/e1000_phy.h
+--- a/drivers/net/ethernet/intel/igb/e1000_phy.h 2016-11-13 09:20:24.790171605 +0000
++++ b/drivers/net/ethernet/intel/igb/e1000_phy.h 2016-11-14 14:32:08.579567168 +0000
+@@ -1,146 +1,115 @@
+-/* Intel(R) Gigabit Ethernet Linux driver
+- * Copyright(c) 2007-2014 Intel Corporation.
+- *
+- * This program is free software; you can redistribute it and/or modify it
+- * under the terms and conditions of the GNU General Public License,
+- * version 2, as published by the Free Software Foundation.
+- *
+- * This program is distributed in the hope it will be useful, but WITHOUT
+- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+- * more details.
+- *
+- * You should have received a copy of the GNU General Public License along with
+- * this program; if not, see .
+- *
+- * The full GNU General Public License is included in this distribution in
+- * the file called "COPYING".
+- *
+- * Contact Information:
+- * e1000-devel Mailing List
+- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+- */
++/*******************************************************************************
+
+-#ifndef _E1000_PHY_H_
+-#define _E1000_PHY_H_
++ Intel(R) Gigabit Ethernet Linux driver
++ Copyright(c) 2007-2015 Intel Corporation.
+
+-enum e1000_ms_type {
+- e1000_ms_hw_default = 0,
+- e1000_ms_force_master,
+- e1000_ms_force_slave,
+- e1000_ms_auto
+-};
++ This program is free software; you can redistribute it and/or modify it
++ under the terms and conditions of the GNU General Public License,
++ version 2, as published by the Free Software Foundation.
+
+-enum e1000_smart_speed {
+- e1000_smart_speed_default = 0,
+- e1000_smart_speed_on,
+- e1000_smart_speed_off
+-};
++ This program is distributed in the hope it will be useful, but WITHOUT
++ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ more details.
+
+-s32 igb_check_downshift(struct e1000_hw *hw);
+-s32 igb_check_reset_block(struct e1000_hw *hw);
+-s32 igb_copper_link_setup_igp(struct e1000_hw *hw);
+-s32 igb_copper_link_setup_m88(struct e1000_hw *hw);
+-s32 igb_copper_link_setup_m88_gen2(struct e1000_hw *hw);
+-s32 igb_phy_force_speed_duplex_igp(struct e1000_hw *hw);
+-s32 igb_phy_force_speed_duplex_m88(struct e1000_hw *hw);
+-s32 igb_get_cable_length_m88(struct e1000_hw *hw);
+-s32 igb_get_cable_length_m88_gen2(struct e1000_hw *hw);
+-s32 igb_get_cable_length_igp_2(struct e1000_hw *hw);
+-s32 igb_get_phy_id(struct e1000_hw *hw);
+-s32 igb_get_phy_info_igp(struct e1000_hw *hw);
+-s32 igb_get_phy_info_m88(struct e1000_hw *hw);
+-s32 igb_phy_sw_reset(struct e1000_hw *hw);
+-s32 igb_phy_hw_reset(struct e1000_hw *hw);
+-s32 igb_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data);
+-s32 igb_set_d3_lplu_state(struct e1000_hw *hw, bool active);
+-s32 igb_setup_copper_link(struct e1000_hw *hw);
+-s32 igb_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data);
+-s32 igb_phy_has_link(struct e1000_hw *hw, u32 iterations,
+- u32 usec_interval, bool *success);
+-void igb_power_up_phy_copper(struct e1000_hw *hw);
+-void igb_power_down_phy_copper(struct e1000_hw *hw);
+-s32 igb_phy_init_script_igp3(struct e1000_hw *hw);
+-s32 igb_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data);
+-s32 igb_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data);
+-s32 igb_read_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 *data);
+-s32 igb_write_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 data);
+-s32 igb_read_sfp_data_byte(struct e1000_hw *hw, u16 offset, u8 *data);
+-s32 igb_copper_link_setup_82580(struct e1000_hw *hw);
+-s32 igb_get_phy_info_82580(struct e1000_hw *hw);
+-s32 igb_phy_force_speed_duplex_82580(struct e1000_hw *hw);
+-s32 igb_get_cable_length_82580(struct e1000_hw *hw);
+-s32 igb_read_phy_reg_gs40g(struct e1000_hw *hw, u32 offset, u16 *data);
+-s32 igb_write_phy_reg_gs40g(struct e1000_hw *hw, u32 offset, u16 data);
+-s32 igb_check_polarity_m88(struct e1000_hw *hw);
++ The full GNU General Public License is included in this distribution in
++ the file called "COPYING".
+
+-/* IGP01E1000 Specific Registers */
+-#define IGP01E1000_PHY_PORT_CONFIG 0x10 /* Port Config */
+-#define IGP01E1000_PHY_PORT_STATUS 0x11 /* Status */
+-#define IGP01E1000_PHY_PORT_CTRL 0x12 /* Control */
+-#define IGP01E1000_PHY_LINK_HEALTH 0x13 /* PHY Link Health */
+-#define IGP02E1000_PHY_POWER_MGMT 0x19 /* Power Management */
+-#define IGP01E1000_PHY_PAGE_SELECT 0x1F /* Page Select */
+-#define IGP01E1000_PHY_PCS_INIT_REG 0x00B4
+-#define IGP01E1000_PHY_POLARITY_MASK 0x0078
+-#define IGP01E1000_PSCR_AUTO_MDIX 0x1000
+-#define IGP01E1000_PSCR_FORCE_MDI_MDIX 0x2000 /* 0=MDI, 1=MDIX */
+-#define IGP01E1000_PSCFR_SMART_SPEED 0x0080
+-
+-#define I82580_ADDR_REG 16
+-#define I82580_CFG_REG 22
+-#define I82580_CFG_ASSERT_CRS_ON_TX (1 << 15)
+-#define I82580_CFG_ENABLE_DOWNSHIFT (3 << 10) /* auto downshift 100/10 */
+-#define I82580_CTRL_REG 23
+-#define I82580_CTRL_DOWNSHIFT_MASK (7 << 10)
+-
+-/* 82580 specific PHY registers */
+-#define I82580_PHY_CTRL_2 18
+-#define I82580_PHY_LBK_CTRL 19
+-#define I82580_PHY_STATUS_2 26
+-#define I82580_PHY_DIAG_STATUS 31
+-
+-/* I82580 PHY Status 2 */
+-#define I82580_PHY_STATUS2_REV_POLARITY 0x0400
+-#define I82580_PHY_STATUS2_MDIX 0x0800
+-#define I82580_PHY_STATUS2_SPEED_MASK 0x0300
+-#define I82580_PHY_STATUS2_SPEED_1000MBPS 0x0200
+-#define I82580_PHY_STATUS2_SPEED_100MBPS 0x0100
+-
+-/* I82580 PHY Control 2 */
+-#define I82580_PHY_CTRL2_MANUAL_MDIX 0x0200
+-#define I82580_PHY_CTRL2_AUTO_MDI_MDIX 0x0400
+-#define I82580_PHY_CTRL2_MDIX_CFG_MASK 0x0600
+-
+-/* I82580 PHY Diagnostics Status */
+-#define I82580_DSTATUS_CABLE_LENGTH 0x03FC
+-#define I82580_DSTATUS_CABLE_LENGTH_SHIFT 2
++ Contact Information:
++ Linux NICS
++ e1000-devel Mailing List
++ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+-/* 82580 PHY Power Management */
+-#define E1000_82580_PHY_POWER_MGMT 0xE14
+-#define E1000_82580_PM_SPD 0x0001 /* Smart Power Down */
+-#define E1000_82580_PM_D0_LPLU 0x0002 /* For D0a states */
+-#define E1000_82580_PM_D3_LPLU 0x0004 /* For all other states */
+-#define E1000_82580_PM_GO_LINKD 0x0020 /* Go Link Disconnect */
++*******************************************************************************/
++
++#ifndef _E1000_PHY_H_
++#define _E1000_PHY_H_
++
++void e1000_init_phy_ops_generic(struct e1000_hw *hw);
++s32 e1000_null_read_reg(struct e1000_hw *hw, u32 offset, u16 *data);
++void e1000_null_phy_generic(struct e1000_hw *hw);
++s32 e1000_null_lplu_state(struct e1000_hw *hw, bool active);
++s32 e1000_null_write_reg(struct e1000_hw *hw, u32 offset, u16 data);
++s32 e1000_null_set_page(struct e1000_hw *hw, u16 data);
++s32 e1000_read_i2c_byte_null(struct e1000_hw *hw, u8 byte_offset,
++ u8 dev_addr, u8 *data);
++s32 e1000_write_i2c_byte_null(struct e1000_hw *hw, u8 byte_offset,
++ u8 dev_addr, u8 data);
++s32 e1000_check_downshift_generic(struct e1000_hw *hw);
++s32 igb_e1000_check_polarity_m88(struct e1000_hw *hw);
++s32 igb_e1000_check_polarity_igp(struct e1000_hw *hw);
++s32 igb_e1000_check_polarity_ife(struct e1000_hw *hw);
++s32 e1000_check_reset_block_generic(struct e1000_hw *hw);
++s32 e1000_copper_link_setup_igp(struct e1000_hw *hw);
++s32 e1000_copper_link_setup_m88(struct e1000_hw *hw);
++s32 e1000_copper_link_setup_m88_gen2(struct e1000_hw *hw);
++s32 e1000_phy_force_speed_duplex_igp(struct e1000_hw *hw);
++s32 e1000_phy_force_speed_duplex_m88(struct e1000_hw *hw);
++s32 igb_e1000_phy_force_speed_duplex_ife(struct e1000_hw *hw);
++s32 e1000_get_cable_length_m88(struct e1000_hw *hw);
++s32 e1000_get_cable_length_m88_gen2(struct e1000_hw *hw);
++s32 e1000_get_cable_length_igp_2(struct e1000_hw *hw);
++s32 e1000_get_cfg_done_generic(struct e1000_hw *hw);
++s32 e1000_get_phy_id(struct e1000_hw *hw);
++s32 e1000_get_phy_info_igp(struct e1000_hw *hw);
++s32 e1000_get_phy_info_m88(struct e1000_hw *hw);
++s32 igb_e1000_get_phy_info_ife(struct e1000_hw *hw);
++s32 e1000_phy_sw_reset_generic(struct e1000_hw *hw);
++void e1000_phy_force_speed_duplex_setup(struct e1000_hw *hw, u16 *phy_ctrl);
++s32 e1000_phy_hw_reset_generic(struct e1000_hw *hw);
++s32 e1000_phy_reset_dsp_generic(struct e1000_hw *hw);
++s32 e1000_read_kmrn_reg_generic(struct e1000_hw *hw, u32 offset, u16 *data);
++s32 e1000_read_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, u16 *data);
++s32 igb_e1000_set_page_igp(struct e1000_hw *hw, u16 page);
++s32 e1000_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data);
++s32 e1000_read_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, u16 *data);
++s32 e1000_read_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 *data);
++s32 e1000_set_d3_lplu_state_generic(struct e1000_hw *hw, bool active);
++s32 e1000_setup_copper_link_generic(struct e1000_hw *hw);
++s32 e1000_write_kmrn_reg_generic(struct e1000_hw *hw, u32 offset, u16 data);
++s32 e1000_write_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, u16 data);
++s32 e1000_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data);
++s32 e1000_write_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, u16 data);
++s32 e1000_write_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 data);
++s32 e1000_phy_has_link_generic(struct e1000_hw *hw, u32 iterations,
++ u32 usec_interval, bool *success);
++s32 e1000_phy_init_script_igp3(struct e1000_hw *hw);
++enum e1000_phy_type e1000_get_phy_type_from_id(u32 phy_id);
++s32 e1000_determine_phy_address(struct e1000_hw *hw);
++s32 e1000_enable_phy_wakeup_reg_access_bm(struct e1000_hw *hw, u16 *phy_reg);
++s32 e1000_disable_phy_wakeup_reg_access_bm(struct e1000_hw *hw, u16 *phy_reg);
++void igb_e1000_power_up_phy_copper(struct e1000_hw *hw);
++void igb_e1000_power_down_phy_copper(struct e1000_hw *hw);
++s32 e1000_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data);
++s32 e1000_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data);
++s32 e1000_read_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 *data);
++s32 e1000_write_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 data);
++s32 e1000_read_sfp_data_byte(struct e1000_hw *hw, u16 offset, u8 *data);
++s32 e1000_write_sfp_data_byte(struct e1000_hw *hw, u16 offset, u8 data);
++s32 igb_e1000_copper_link_setup_82577(struct e1000_hw *hw);
++s32 igb_e1000_check_polarity_82577(struct e1000_hw *hw);
++s32 igb_e1000_get_phy_info_82577(struct e1000_hw *hw);
++s32 igb_e1000_phy_force_speed_duplex_82577(struct e1000_hw *hw);
++s32 igb_e1000_get_cable_length_82577(struct e1000_hw *hw);
++s32 e1000_write_phy_reg_gs40g(struct e1000_hw *hw, u32 offset, u16 data);
++s32 e1000_read_phy_reg_gs40g(struct e1000_hw *hw, u32 offset, u16 *data);
++s32 e1000_read_phy_reg_mphy(struct e1000_hw *hw, u32 address, u32 *data);
++s32 e1000_write_phy_reg_mphy(struct e1000_hw *hw, u32 address, u32 data,
++ bool line_override);
++bool e1000_is_mphy_ready(struct e1000_hw *hw);
+
+-/* Enable flexible speed on link-up */
+-#define IGP02E1000_PM_D0_LPLU 0x0002 /* For D0a states */
+-#define IGP02E1000_PM_D3_LPLU 0x0004 /* For all other states */
+-#define IGP01E1000_PLHR_SS_DOWNGRADE 0x8000
+-#define IGP01E1000_PSSR_POLARITY_REVERSED 0x0002
+-#define IGP01E1000_PSSR_MDIX 0x0800
+-#define IGP01E1000_PSSR_SPEED_MASK 0xC000
+-#define IGP01E1000_PSSR_SPEED_1000MBPS 0xC000
+-#define IGP02E1000_PHY_CHANNEL_NUM 4
+-#define IGP02E1000_PHY_AGC_A 0x11B1
+-#define IGP02E1000_PHY_AGC_B 0x12B1
+-#define IGP02E1000_PHY_AGC_C 0x14B1
+-#define IGP02E1000_PHY_AGC_D 0x18B1
+-#define IGP02E1000_AGC_LENGTH_SHIFT 9 /* Course - 15:13, Fine - 12:9 */
+-#define IGP02E1000_AGC_LENGTH_MASK 0x7F
+-#define IGP02E1000_AGC_RANGE 15
++#define E1000_MAX_PHY_ADDR 8
+
+-#define E1000_CABLE_LENGTH_UNDEFINED 0xFF
++/* IGP01E1000 Specific Registers */
++#define IGP01E1000_PHY_PORT_CONFIG 0x10 /* Port Config */
++#define IGP01E1000_PHY_PORT_STATUS 0x11 /* Status */
++#define IGP01E1000_PHY_PORT_CTRL 0x12 /* Control */
++#define IGP01E1000_PHY_LINK_HEALTH 0x13 /* PHY Link Health */
++#define IGP02E1000_PHY_POWER_MGMT 0x19 /* Power Management */
++#define IGP01E1000_PHY_PAGE_SELECT 0x1F /* Page Select */
++#define BM_PHY_PAGE_SELECT 22 /* Page Select for BM */
++#define IGP_PAGE_SHIFT 5
++#define PHY_REG_MASK 0x1F
+
+ /* GS40G - I210 PHY defines */
+ #define GS40G_PAGE_SELECT 0x16
+@@ -151,7 +120,110 @@
+ #define GS40G_MAC_LB 0x4140
+ #define GS40G_MAC_SPEED_1G 0X0006
+ #define GS40G_COPPER_SPEC 0x0010
+-#define GS40G_LINE_LB 0x4000
++
++#define HV_INTC_FC_PAGE_START 768
++#define I82578_ADDR_REG 29
++#define I82577_ADDR_REG 16
++#define I82577_CFG_REG 22
++#define I82577_CFG_ASSERT_CRS_ON_TX (1 << 15)
++#define I82577_CFG_ENABLE_DOWNSHIFT (3 << 10) /* auto downshift */
++#define I82577_CTRL_REG 23
++
++/* 82577 specific PHY registers */
++#define I82577_PHY_CTRL_2 18
++#define I82577_PHY_LBK_CTRL 19
++#define I82577_PHY_STATUS_2 26
++#define I82577_PHY_DIAG_STATUS 31
++
++/* I82577 PHY Status 2 */
++#define I82577_PHY_STATUS2_REV_POLARITY 0x0400
++#define I82577_PHY_STATUS2_MDIX 0x0800
++#define I82577_PHY_STATUS2_SPEED_MASK 0x0300
++#define I82577_PHY_STATUS2_SPEED_1000MBPS 0x0200
++
++/* I82577 PHY Control 2 */
++#define I82577_PHY_CTRL2_MANUAL_MDIX 0x0200
++#define I82577_PHY_CTRL2_AUTO_MDI_MDIX 0x0400
++#define I82577_PHY_CTRL2_MDIX_CFG_MASK 0x0600
++
++/* I82577 PHY Diagnostics Status */
++#define I82577_DSTATUS_CABLE_LENGTH 0x03FC
++#define I82577_DSTATUS_CABLE_LENGTH_SHIFT 2
++
++/* 82580 PHY Power Management */
++#define E1000_82580_PHY_POWER_MGMT 0xE14
++#define E1000_82580_PM_SPD 0x0001 /* Smart Power Down */
++#define E1000_82580_PM_D0_LPLU 0x0002 /* For D0a states */
++#define E1000_82580_PM_D3_LPLU 0x0004 /* For all other states */
++#define E1000_82580_PM_GO_LINKD 0x0020 /* Go Link Disconnect */
++
++#define E1000_MPHY_DIS_ACCESS 0x80000000 /* disable_access bit */
++#define E1000_MPHY_ENA_ACCESS 0x40000000 /* enable_access bit */
++#define E1000_MPHY_BUSY 0x00010000 /* busy bit */
++#define E1000_MPHY_ADDRESS_FNC_OVERRIDE 0x20000000 /* fnc_override bit */
++#define E1000_MPHY_ADDRESS_MASK 0x0000FFFF /* address mask */
++
++#define IGP01E1000_PHY_PCS_INIT_REG 0x00B4
++#define IGP01E1000_PHY_POLARITY_MASK 0x0078
++
++#define IGP01E1000_PSCR_AUTO_MDIX 0x1000
++#define IGP01E1000_PSCR_FORCE_MDI_MDIX 0x2000 /* 0=MDI, 1=MDIX */
++
++#define IGP01E1000_PSCFR_SMART_SPEED 0x0080
++
++#define IGP02E1000_PM_SPD 0x0001 /* Smart Power Down */
++#define IGP02E1000_PM_D0_LPLU 0x0002 /* For D0a states */
++#define IGP02E1000_PM_D3_LPLU 0x0004 /* For all other states */
++
++#define IGP01E1000_PLHR_SS_DOWNGRADE 0x8000
++
++#define IGP01E1000_PSSR_POLARITY_REVERSED 0x0002
++#define IGP01E1000_PSSR_MDIX 0x0800
++#define IGP01E1000_PSSR_SPEED_MASK 0xC000
++#define IGP01E1000_PSSR_SPEED_1000MBPS 0xC000
++
++#define IGP02E1000_PHY_CHANNEL_NUM 4
++#define IGP02E1000_PHY_AGC_A 0x11B1
++#define IGP02E1000_PHY_AGC_B 0x12B1
++#define IGP02E1000_PHY_AGC_C 0x14B1
++#define IGP02E1000_PHY_AGC_D 0x18B1
++
++#define IGP02E1000_AGC_LENGTH_SHIFT 9 /* Course=15:13, Fine=12:9 */
++#define IGP02E1000_AGC_LENGTH_MASK 0x7F
++#define IGP02E1000_AGC_RANGE 15
++
++#define E1000_CABLE_LENGTH_UNDEFINED 0xFF
++
++#define E1000_KMRNCTRLSTA_OFFSET 0x001F0000
++#define E1000_KMRNCTRLSTA_OFFSET_SHIFT 16
++#define E1000_KMRNCTRLSTA_REN 0x00200000
++#define E1000_KMRNCTRLSTA_DIAG_OFFSET 0x3 /* Kumeran Diagnostic */
++#define E1000_KMRNCTRLSTA_TIMEOUTS 0x4 /* Kumeran Timeouts */
++#define E1000_KMRNCTRLSTA_INBAND_PARAM 0x9 /* Kumeran InBand Parameters */
++#define E1000_KMRNCTRLSTA_IBIST_DISABLE 0x0200 /* Kumeran IBIST Disable */
++#define E1000_KMRNCTRLSTA_DIAG_NELPBK 0x1000 /* Nearend Loopback mode */
++
++#define IFE_PHY_EXTENDED_STATUS_CONTROL 0x10
++#define IFE_PHY_SPECIAL_CONTROL 0x11 /* 100BaseTx PHY Special Ctrl */
++#define IFE_PHY_SPECIAL_CONTROL_LED 0x1B /* PHY Special and LED Ctrl */
++#define IFE_PHY_MDIX_CONTROL 0x1C /* MDI/MDI-X Control */
++
++/* IFE PHY Extended Status Control */
++#define IFE_PESC_POLARITY_REVERSED 0x0100
++
++/* IFE PHY Special Control */
++#define IFE_PSC_AUTO_POLARITY_DISABLE 0x0010
++#define IFE_PSC_FORCE_POLARITY 0x0020
++
++/* IFE PHY Special Control and LED Control */
++#define IFE_PSCL_PROBE_MODE 0x0020
++#define IFE_PSCL_PROBE_LEDS_OFF 0x0006 /* Force LEDs 0 and 2 off */
++#define IFE_PSCL_PROBE_LEDS_ON 0x0007 /* Force LEDs 0 and 2 on */
++
++/* IFE PHY MDIX Control */
++#define IFE_PMC_MDIX_STATUS 0x0020 /* 1=MDI-X, 0=MDI */
++#define IFE_PMC_FORCE_MDIX 0x0040 /* 1=force MDI-X, 0=force MDI */
++#define IFE_PMC_AUTO_MDIX 0x0080 /* 1=enable auto, 0=disable */
+
+ /* SFP modules ID memory locations */
+ #define E1000_SFF_IDENTIFIER_OFFSET 0x00
+@@ -160,7 +232,7 @@
+
+ #define E1000_SFF_ETH_FLAGS_OFFSET 0x06
+ /* Flags for SFP modules compatible with ETH up to 1Gb */
+-struct e1000_sfp_flags {
++struct sfp_e1000_flags {
+ u8 e1000_base_sx:1;
+ u8 e1000_base_lx:1;
+ u8 e1000_base_cx:1;
+@@ -171,4 +243,10 @@
+ u8 e10_base_px:1;
+ };
+
++/* Vendor OUIs: format of OUI is 0x[byte0][byte1][byte2][00] */
++#define E1000_SFF_VENDOR_OUI_TYCO 0x00407600
++#define E1000_SFF_VENDOR_OUI_FTL 0x00906500
++#define E1000_SFF_VENDOR_OUI_AVAGO 0x00176A00
++#define E1000_SFF_VENDOR_OUI_INTEL 0x001B2100
++
+ #endif
+diff -Nu a/drivers/net/ethernet/intel/igb/e1000_regs.h b/drivers/net/ethernet/intel/igb/e1000_regs.h
+--- a/drivers/net/ethernet/intel/igb/e1000_regs.h 2016-11-13 09:20:24.790171605 +0000
++++ b/drivers/net/ethernet/intel/igb/e1000_regs.h 2016-11-14 14:32:08.579567168 +0000
+@@ -1,154 +1,196 @@
+-/* Intel(R) Gigabit Ethernet Linux driver
+- * Copyright(c) 2007-2014 Intel Corporation.
+- *
+- * This program is free software; you can redistribute it and/or modify it
+- * under the terms and conditions of the GNU General Public License,
+- * version 2, as published by the Free Software Foundation.
+- *
+- * This program is distributed in the hope it will be useful, but WITHOUT
+- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+- * more details.
+- *
+- * You should have received a copy of the GNU General Public License along with
+- * this program; if not, see .
+- *
+- * The full GNU General Public License is included in this distribution in
+- * the file called "COPYING".
+- *
+- * Contact Information:
+- * e1000-devel Mailing List
+- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+- */
++/*******************************************************************************
++
++ Intel(R) Gigabit Ethernet Linux driver
++ Copyright(c) 2007-2015 Intel Corporation.
++
++ This program is free software; you can redistribute it and/or modify it
++ under the terms and conditions of the GNU General Public License,
++ version 2, as published by the Free Software Foundation.
++
++ This program is distributed in the hope it will be useful, but WITHOUT
++ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ more details.
++
++ The full GNU General Public License is included in this distribution in
++ the file called "COPYING".
++
++ Contact Information:
++ Linux NICS
++ e1000-devel Mailing List
++ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
++
++*******************************************************************************/
+
+ #ifndef _E1000_REGS_H_
+ #define _E1000_REGS_H_
+
+-#define E1000_CTRL 0x00000 /* Device Control - RW */
+-#define E1000_STATUS 0x00008 /* Device Status - RO */
+-#define E1000_EECD 0x00010 /* EEPROM/Flash Control - RW */
+-#define E1000_EERD 0x00014 /* EEPROM Read - RW */
+-#define E1000_CTRL_EXT 0x00018 /* Extended Device Control - RW */
+-#define E1000_MDIC 0x00020 /* MDI Control - RW */
+-#define E1000_MDICNFG 0x00E04 /* MDI Config - RW */
+-#define E1000_SCTL 0x00024 /* SerDes Control - RW */
+-#define E1000_FCAL 0x00028 /* Flow Control Address Low - RW */
+-#define E1000_FCAH 0x0002C /* Flow Control Address High -RW */
+-#define E1000_FCT 0x00030 /* Flow Control Type - RW */
+-#define E1000_CONNSW 0x00034 /* Copper/Fiber switch control - RW */
+-#define E1000_VET 0x00038 /* VLAN Ether Type - RW */
+-#define E1000_TSSDP 0x0003C /* Time Sync SDP Configuration Register - RW */
+-#define E1000_ICR 0x000C0 /* Interrupt Cause Read - R/clr */
+-#define E1000_ITR 0x000C4 /* Interrupt Throttling Rate - RW */
+-#define E1000_ICS 0x000C8 /* Interrupt Cause Set - WO */
+-#define E1000_IMS 0x000D0 /* Interrupt Mask Set - RW */
+-#define E1000_IMC 0x000D8 /* Interrupt Mask Clear - WO */
+-#define E1000_IAM 0x000E0 /* Interrupt Acknowledge Auto Mask */
+-#define E1000_RCTL 0x00100 /* RX Control - RW */
+-#define E1000_FCTTV 0x00170 /* Flow Control Transmit Timer Value - RW */
+-#define E1000_TXCW 0x00178 /* TX Configuration Word - RW */
+-#define E1000_EICR 0x01580 /* Ext. Interrupt Cause Read - R/clr */
+-#define E1000_EITR(_n) (0x01680 + (0x4 * (_n)))
+-#define E1000_EICS 0x01520 /* Ext. Interrupt Cause Set - W0 */
+-#define E1000_EIMS 0x01524 /* Ext. Interrupt Mask Set/Read - RW */
+-#define E1000_EIMC 0x01528 /* Ext. Interrupt Mask Clear - WO */
+-#define E1000_EIAC 0x0152C /* Ext. Interrupt Auto Clear - RW */
+-#define E1000_EIAM 0x01530 /* Ext. Interrupt Ack Auto Clear Mask - RW */
+-#define E1000_GPIE 0x01514 /* General Purpose Interrupt Enable - RW */
+-#define E1000_IVAR0 0x01700 /* Interrupt Vector Allocation (array) - RW */
+-#define E1000_IVAR_MISC 0x01740 /* IVAR for "other" causes - RW */
+-#define E1000_TCTL 0x00400 /* TX Control - RW */
+-#define E1000_TCTL_EXT 0x00404 /* Extended TX Control - RW */
+-#define E1000_TIPG 0x00410 /* TX Inter-packet gap -RW */
+-#define E1000_AIT 0x00458 /* Adaptive Interframe Spacing Throttle - RW */
+-#define E1000_LEDCTL 0x00E00 /* LED Control - RW */
+-#define E1000_LEDMUX 0x08130 /* LED MUX Control */
+-#define E1000_PBA 0x01000 /* Packet Buffer Allocation - RW */
+-#define E1000_PBS 0x01008 /* Packet Buffer Size */
+-#define E1000_EEMNGCTL 0x01010 /* MNG EEprom Control */
+-#define E1000_EEARBC_I210 0x12024 /* EEPROM Auto Read Bus Control */
+-#define E1000_EEWR 0x0102C /* EEPROM Write Register - RW */
+-#define E1000_I2CCMD 0x01028 /* SFPI2C Command Register - RW */
+-#define E1000_FRTIMER 0x01048 /* Free Running Timer - RW */
+-#define E1000_TCPTIMER 0x0104C /* TCP Timer - RW */
+-#define E1000_FCRTL 0x02160 /* Flow Control Receive Threshold Low - RW */
+-#define E1000_FCRTH 0x02168 /* Flow Control Receive Threshold High - RW */
+-#define E1000_FCRTV 0x02460 /* Flow Control Refresh Timer Value - RW */
+-#define E1000_I2CPARAMS 0x0102C /* SFPI2C Parameters Register - RW */
+-#define E1000_I2CBB_EN 0x00000100 /* I2C - Bit Bang Enable */
+-#define E1000_I2C_CLK_OUT 0x00000200 /* I2C- Clock */
+-#define E1000_I2C_DATA_OUT 0x00000400 /* I2C- Data Out */
+-#define E1000_I2C_DATA_OE_N 0x00000800 /* I2C- Data Output Enable */
+-#define E1000_I2C_DATA_IN 0x00001000 /* I2C- Data In */
+-#define E1000_I2C_CLK_OE_N 0x00002000 /* I2C- Clock Output Enable */
+-#define E1000_I2C_CLK_IN 0x00004000 /* I2C- Clock In */
++#define E1000_CTRL 0x00000 /* Device Control - RW */
++#define E1000_STATUS 0x00008 /* Device Status - RO */
++#define E1000_EECD 0x00010 /* EEPROM/Flash Control - RW */
++#define E1000_EERD 0x00014 /* EEPROM Read - RW */
++#define E1000_CTRL_EXT 0x00018 /* Extended Device Control - RW */
++#define E1000_FLA 0x0001C /* Flash Access - RW */
++#define E1000_MDIC 0x00020 /* MDI Control - RW */
++#define E1000_MDICNFG 0x00E04 /* MDI Config - RW */
++#define E1000_REGISTER_SET_SIZE 0x20000 /* CSR Size */
++#define E1000_EEPROM_INIT_CTRL_WORD_2 0x0F /* EEPROM Init Ctrl Word 2 */
++#define E1000_EEPROM_PCIE_CTRL_WORD_2 0x28 /* EEPROM PCIe Ctrl Word 2 */
++#define E1000_BARCTRL 0x5BBC /* BAR ctrl reg */
++#define E1000_BARCTRL_FLSIZE 0x0700 /* BAR ctrl Flsize */
++#define E1000_BARCTRL_CSRSIZE 0x2000 /* BAR ctrl CSR size */
+ #define E1000_MPHY_ADDR_CTRL 0x0024 /* GbE MPHY Address Control */
+ #define E1000_MPHY_DATA 0x0E10 /* GBE MPHY Data */
+ #define E1000_MPHY_STAT 0x0E0C /* GBE MPHY Statistics */
++#define E1000_PPHY_CTRL 0x5b48 /* PCIe PHY Control */
++#define E1000_I350_BARCTRL 0x5BFC /* BAR ctrl reg */
++#define E1000_I350_DTXMXPKTSZ 0x355C /* Maximum sent packet size reg*/
++#define E1000_SCTL 0x00024 /* SerDes Control - RW */
++#define E1000_FCAL 0x00028 /* Flow Control Address Low - RW */
++#define E1000_FCAH 0x0002C /* Flow Control Address High -RW */
++#define E1000_FCT 0x00030 /* Flow Control Type - RW */
++#define E1000_CONNSW 0x00034 /* Copper/Fiber switch control - RW */
++#define E1000_VET 0x00038 /* VLAN Ether Type - RW */
++#define E1000_ICR 0x000C0 /* Interrupt Cause Read - R/clr */
++#define E1000_ITR 0x000C4 /* Interrupt Throttling Rate - RW */
++#define E1000_ICS 0x000C8 /* Interrupt Cause Set - WO */
++#define E1000_IMS 0x000D0 /* Interrupt Mask Set - RW */
++#define E1000_IMC 0x000D8 /* Interrupt Mask Clear - WO */
++#define E1000_IAM 0x000E0 /* Interrupt Acknowledge Auto Mask */
++#define E1000_RCTL 0x00100 /* Rx Control - RW */
++#define E1000_FCTTV 0x00170 /* Flow Control Transmit Timer Value - RW */
++#define E1000_TXCW 0x00178 /* Tx Configuration Word - RW */
++#define E1000_RXCW 0x00180 /* Rx Configuration Word - RO */
++#define E1000_EICR 0x01580 /* Ext. Interrupt Cause Read - R/clr */
++#define E1000_EITR(_n) (0x01680 + (0x4 * (_n)))
++#define E1000_EICS 0x01520 /* Ext. Interrupt Cause Set - W0 */
++#define E1000_EIMS 0x01524 /* Ext. Interrupt Mask Set/Read - RW */
++#define E1000_EIMC 0x01528 /* Ext. Interrupt Mask Clear - WO */
++#define E1000_EIAC 0x0152C /* Ext. Interrupt Auto Clear - RW */
++#define E1000_EIAM 0x01530 /* Ext. Interrupt Ack Auto Clear Mask - RW */
++#define E1000_GPIE 0x01514 /* General Purpose Interrupt Enable - RW */
++#define E1000_IVAR0 0x01700 /* Interrupt Vector Allocation (array) - RW */
++#define E1000_IVAR_MISC 0x01740 /* IVAR for "other" causes - RW */
++#define E1000_TCTL 0x00400 /* Tx Control - RW */
++#define E1000_TCTL_EXT 0x00404 /* Extended Tx Control - RW */
++#define E1000_TIPG 0x00410 /* Tx Inter-packet gap -RW */
++#define E1000_AIT 0x00458 /* Adaptive Interframe Spacing Throttle - RW */
++#define E1000_LEDCTL 0x00E00 /* LED Control - RW */
++#define E1000_LEDMUX 0x08130 /* LED MUX Control */
++#define E1000_EXTCNF_CTRL 0x00F00 /* Extended Configuration Control */
++#define E1000_EXTCNF_SIZE 0x00F08 /* Extended Configuration Size */
++#define E1000_PHY_CTRL 0x00F10 /* PHY Control Register in CSR */
++#define E1000_PBA 0x01000 /* Packet Buffer Allocation - RW */
++#define E1000_PBS 0x01008 /* Packet Buffer Size */
++#define E1000_EEMNGCTL 0x01010 /* MNG EEprom Control */
++#define E1000_EEMNGCTL_I210 0x01010 /* i210 MNG EEprom Mode Control */
++#define E1000_EEARBC 0x01024 /* EEPROM Auto Read Bus Control */
++#define E1000_EEARBC_I210 0x12024 /* EEPROM Auto Read Bus Control */
++#define E1000_EEWR 0x0102C /* EEPROM Write Register - RW */
++#define E1000_FLOP 0x0103C /* FLASH Opcode Register */
++#define E1000_I2CCMD 0x01028 /* SFPI2C Command Register - RW */
++#define E1000_I2CPARAMS 0x0102C /* SFPI2C Parameters Register - RW */
++#define E1000_I2CBB_EN 0x00000100 /* I2C - Bit Bang Enable */
++#define E1000_I2C_CLK_OUT 0x00000200 /* I2C- Clock */
++#define E1000_I2C_DATA_OUT 0x00000400 /* I2C- Data Out */
++#define E1000_I2C_DATA_OE_N 0x00000800 /* I2C- Data Output Enable */
++#define E1000_I2C_DATA_IN 0x00001000 /* I2C- Data In */
++#define E1000_I2C_CLK_OE_N 0x00002000 /* I2C- Clock Output Enable */
++#define E1000_I2C_CLK_IN 0x00004000 /* I2C- Clock In */
++#define E1000_I2C_CLK_STRETCH_DIS 0x00008000 /* I2C- Dis Clk Stretching */
++#define E1000_WDSTP 0x01040 /* Watchdog Setup - RW */
++#define E1000_SWDSTS 0x01044 /* SW Device Status - RW */
++#define E1000_FRTIMER 0x01048 /* Free Running Timer - RW */
++#define E1000_TCPTIMER 0x0104C /* TCP Timer - RW */
++#define E1000_VPDDIAG 0x01060 /* VPD Diagnostic - RO */
++#define E1000_ICR_V2 0x01500 /* Intr Cause - new location - RC */
++#define E1000_ICS_V2 0x01504 /* Intr Cause Set - new location - WO */
++#define E1000_IMS_V2 0x01508 /* Intr Mask Set/Read - new location - RW */
++#define E1000_IMC_V2 0x0150C /* Intr Mask Clear - new location - WO */
++#define E1000_IAM_V2 0x01510 /* Intr Ack Auto Mask - new location - RW */
++#define E1000_ERT 0x02008 /* Early Rx Threshold - RW */
++#define E1000_FCRTL 0x02160 /* Flow Control Receive Threshold Low - RW */
++#define E1000_FCRTH 0x02168 /* Flow Control Receive Threshold High - RW */
++#define E1000_PSRCTL 0x02170 /* Packet Split Receive Control - RW */
++#define E1000_RDFH 0x02410 /* Rx Data FIFO Head - RW */
++#define E1000_RDFT 0x02418 /* Rx Data FIFO Tail - RW */
++#define E1000_RDFHS 0x02420 /* Rx Data FIFO Head Saved - RW */
++#define E1000_RDFTS 0x02428 /* Rx Data FIFO Tail Saved - RW */
++#define E1000_RDFPC 0x02430 /* Rx Data FIFO Packet Count - RW */
++#define E1000_PBRTH 0x02458 /* PB Rx Arbitration Threshold - RW */
++#define E1000_FCRTV 0x02460 /* Flow Control Refresh Timer Value - RW */
++/* Split and Replication Rx Control - RW */
++#define E1000_RDPUMB 0x025CC /* DMA Rx Descriptor uC Mailbox - RW */
++#define E1000_RDPUAD 0x025D0 /* DMA Rx Descriptor uC Addr Command - RW */
++#define E1000_RDPUWD 0x025D4 /* DMA Rx Descriptor uC Data Write - RW */
++#define E1000_RDPURD 0x025D8 /* DMA Rx Descriptor uC Data Read - RW */
++#define E1000_RDPUCTL 0x025DC /* DMA Rx Descriptor uC Control - RW */
++#define E1000_PBDIAG 0x02458 /* Packet Buffer Diagnostic - RW */
++#define E1000_RXPBS 0x02404 /* Rx Packet Buffer Size - RW */
++#define E1000_IRPBS 0x02404 /* Same as RXPBS, renamed for newer Si - RW */
++#define E1000_PBRWAC 0x024E8 /* Rx packet buffer wrap around counter - RO */
++#define E1000_RDTR 0x02820 /* Rx Delay Timer - RW */
++#define E1000_RADV 0x0282C /* Rx Interrupt Absolute Delay Timer - RW */
++#define E1000_EMIADD 0x10 /* Extended Memory Indirect Address */
++#define E1000_EMIDATA 0x11 /* Extended Memory Indirect Data */
++#define E1000_SRWR 0x12018 /* Shadow Ram Write Register - RW */
++#define E1000_I210_FLMNGCTL 0x12038
++#define E1000_I210_FLMNGDATA 0x1203C
++#define E1000_I210_FLMNGCNT 0x12040
+
+-/* IEEE 1588 TIMESYNCH */
+-#define E1000_TSYNCRXCTL 0x0B620 /* Rx Time Sync Control register - RW */
+-#define E1000_TSYNCTXCTL 0x0B614 /* Tx Time Sync Control register - RW */
+-#define E1000_TSYNCRXCFG 0x05F50 /* Time Sync Rx Configuration - RW */
+-#define E1000_RXSTMPL 0x0B624 /* Rx timestamp Low - RO */
+-#define E1000_RXSTMPH 0x0B628 /* Rx timestamp High - RO */
+-#define E1000_RXSATRL 0x0B62C /* Rx timestamp attribute low - RO */
+-#define E1000_RXSATRH 0x0B630 /* Rx timestamp attribute high - RO */
+-#define E1000_TXSTMPL 0x0B618 /* Tx timestamp value Low - RO */
+-#define E1000_TXSTMPH 0x0B61C /* Tx timestamp value High - RO */
+-#define E1000_SYSTIML 0x0B600 /* System time register Low - RO */
+-#define E1000_SYSTIMH 0x0B604 /* System time register High - RO */
+-#define E1000_TIMINCA 0x0B608 /* Increment attributes register - RW */
+-#define E1000_TSAUXC 0x0B640 /* Timesync Auxiliary Control register */
+-#define E1000_TRGTTIML0 0x0B644 /* Target Time Register 0 Low - RW */
+-#define E1000_TRGTTIMH0 0x0B648 /* Target Time Register 0 High - RW */
+-#define E1000_TRGTTIML1 0x0B64C /* Target Time Register 1 Low - RW */
+-#define E1000_TRGTTIMH1 0x0B650 /* Target Time Register 1 High - RW */
+-#define E1000_AUXSTMPL0 0x0B65C /* Auxiliary Time Stamp 0 Register Low - RO */
+-#define E1000_AUXSTMPH0 0x0B660 /* Auxiliary Time Stamp 0 Register High - RO */
+-#define E1000_AUXSTMPL1 0x0B664 /* Auxiliary Time Stamp 1 Register Low - RO */
+-#define E1000_AUXSTMPH1 0x0B668 /* Auxiliary Time Stamp 1 Register High - RO */
+-#define E1000_SYSTIMR 0x0B6F8 /* System time register Residue */
+-#define E1000_TSICR 0x0B66C /* Interrupt Cause Register */
+-#define E1000_TSIM 0x0B674 /* Interrupt Mask Register */
++#define E1000_I210_FLSWCTL 0x12048
++#define E1000_I210_FLSWDATA 0x1204C
++#define E1000_I210_FLSWCNT 0x12050
+
+-/* Filtering Registers */
+-#define E1000_SAQF(_n) (0x5980 + 4 * (_n))
+-#define E1000_DAQF(_n) (0x59A0 + 4 * (_n))
+-#define E1000_SPQF(_n) (0x59C0 + 4 * (_n))
+-#define E1000_FTQF(_n) (0x59E0 + 4 * (_n))
+-#define E1000_SAQF0 E1000_SAQF(0)
+-#define E1000_DAQF0 E1000_DAQF(0)
+-#define E1000_SPQF0 E1000_SPQF(0)
+-#define E1000_FTQF0 E1000_FTQF(0)
+-#define E1000_SYNQF(_n) (0x055FC + (4 * (_n))) /* SYN Packet Queue Fltr */
+-#define E1000_ETQF(_n) (0x05CB0 + (4 * (_n))) /* EType Queue Fltr */
++#define E1000_I210_FLA 0x1201C
+
+-#define E1000_RQDPC(_n) (0x0C030 + ((_n) * 0x40))
++#define E1000_INVM_DATA_REG(_n) (0x12120 + 4*(_n))
++#define E1000_INVM_SIZE 64 /* Number of INVM Data Registers */
+
+-/* DMA Coalescing registers */
+-#define E1000_DMACR 0x02508 /* Control Register */
+-#define E1000_DMCTXTH 0x03550 /* Transmit Threshold */
+-#define E1000_DMCTLX 0x02514 /* Time to Lx Request */
+-#define E1000_DMCRTRH 0x05DD0 /* Receive Packet Rate Threshold */
+-#define E1000_DMCCNT 0x05DD4 /* Current Rx Count */
+-#define E1000_FCRTC 0x02170 /* Flow Control Rx high watermark */
+-#define E1000_PCIEMISC 0x05BB8 /* PCIE misc config register */
++/* QAV Tx mode control register */
++#define E1000_I210_TQAVCTRL 0x3570
+
+-/* TX Rate Limit Registers */
+-#define E1000_RTTDQSEL 0x3604 /* Tx Desc Plane Queue Select - WO */
+-#define E1000_RTTBCNRM 0x3690 /* Tx BCN Rate-scheduler MMW */
+-#define E1000_RTTBCNRC 0x36B0 /* Tx BCN Rate-Scheduler Config - WO */
++/* QAV Tx mode control register bitfields masks */
++/* QAV enable */
++#define E1000_TQAVCTRL_MODE (1 << 0)
++/* Fetching arbitration type */
++#define E1000_TQAVCTRL_FETCH_ARB (1 << 4)
++/* Fetching timer enable */
++#define E1000_TQAVCTRL_FETCH_TIMER_ENABLE (1 << 5)
++/* Launch arbitration type */
++#define E1000_TQAVCTRL_LAUNCH_ARB (1 << 8)
++/* Launch timer enable */
++#define E1000_TQAVCTRL_LAUNCH_TIMER_ENABLE (1 << 9)
++/* SP waits for SR enable */
++#define E1000_TQAVCTRL_SP_WAIT_SR (1 << 10)
++/* Fetching timer correction */
++#define E1000_TQAVCTRL_FETCH_TIMER_DELTA_OFFSET 16
++#define E1000_TQAVCTRL_FETCH_TIMER_DELTA \
++ (0xFFFF << E1000_TQAVCTRL_FETCH_TIMER_DELTA_OFFSET)
++
++/* High credit registers where _n can be 0 or 1. */
++#define E1000_I210_TQAVHC(_n) (0x300C + 0x40 * (_n))
++
++/* Queues fetch arbitration priority control register */
++#define E1000_I210_TQAVARBCTRL 0x3574
++/* Queues priority masks where _n and _p can be 0-3. */
++#define E1000_TQAVARBCTRL_QUEUE_PRI(_n, _p) ((_p) << (2 * (_n)))
++/* QAV Tx mode control registers where _n can be 0 or 1. */
++#define E1000_I210_TQAVCC(_n) (0x3004 + 0x40 * (_n))
++
++/* QAV Tx mode control register bitfields masks */
++#define E1000_TQAVCC_IDLE_SLOPE 0xFFFF /* Idle slope */
++#define E1000_TQAVCC_KEEP_CREDITS (1 << 30) /* Keep credits opt enable */
++#define E1000_TQAVCC_QUEUE_MODE (1 << 31) /* SP vs. SR Tx mode */
+
+-/* Split and Replication RX Control - RW */
+-#define E1000_RXPBS 0x02404 /* Rx Packet Buffer Size - RW */
++/* Good transmitted packets counter registers */
++#define E1000_PQGPTC(_n) (0x010014 + (0x100 * (_n)))
+
+-/* Thermal sensor configuration and status registers */
+-#define E1000_THMJT 0x08100 /* Junction Temperature */
+-#define E1000_THLOWTC 0x08104 /* Low Threshold Control */
+-#define E1000_THMIDTC 0x08108 /* Mid Threshold Control */
+-#define E1000_THHIGHTC 0x0810C /* High Threshold Control */
+-#define E1000_THSTAT 0x08110 /* Thermal Sensor Status */
++/* Queues packet buffer size masks where _n can be 0-3 and _s 0-63 [kB] */
++#define E1000_I210_TXPBS_SIZE(_n, _s) ((_s) << (6 * (_n)))
++
++#define E1000_MMDAC 13 /* MMD Access Control */
++#define E1000_MMDAAD 14 /* MMD Access Address/Data */
+
+ /* Convenience macros
+ *
+@@ -157,269 +199,442 @@
+ * Example usage:
+ * E1000_RDBAL_REG(current_rx_queue)
+ */
+-#define E1000_RDBAL(_n) ((_n) < 4 ? (0x02800 + ((_n) * 0x100)) \
+- : (0x0C000 + ((_n) * 0x40)))
+-#define E1000_RDBAH(_n) ((_n) < 4 ? (0x02804 + ((_n) * 0x100)) \
+- : (0x0C004 + ((_n) * 0x40)))
+-#define E1000_RDLEN(_n) ((_n) < 4 ? (0x02808 + ((_n) * 0x100)) \
+- : (0x0C008 + ((_n) * 0x40)))
+-#define E1000_SRRCTL(_n) ((_n) < 4 ? (0x0280C + ((_n) * 0x100)) \
+- : (0x0C00C + ((_n) * 0x40)))
+-#define E1000_RDH(_n) ((_n) < 4 ? (0x02810 + ((_n) * 0x100)) \
+- : (0x0C010 + ((_n) * 0x40)))
+-#define E1000_RDT(_n) ((_n) < 4 ? (0x02818 + ((_n) * 0x100)) \
+- : (0x0C018 + ((_n) * 0x40)))
+-#define E1000_RXDCTL(_n) ((_n) < 4 ? (0x02828 + ((_n) * 0x100)) \
+- : (0x0C028 + ((_n) * 0x40)))
+-#define E1000_TDBAL(_n) ((_n) < 4 ? (0x03800 + ((_n) * 0x100)) \
+- : (0x0E000 + ((_n) * 0x40)))
+-#define E1000_TDBAH(_n) ((_n) < 4 ? (0x03804 + ((_n) * 0x100)) \
+- : (0x0E004 + ((_n) * 0x40)))
+-#define E1000_TDLEN(_n) ((_n) < 4 ? (0x03808 + ((_n) * 0x100)) \
+- : (0x0E008 + ((_n) * 0x40)))
+-#define E1000_TDH(_n) ((_n) < 4 ? (0x03810 + ((_n) * 0x100)) \
+- : (0x0E010 + ((_n) * 0x40)))
+-#define E1000_TDT(_n) ((_n) < 4 ? (0x03818 + ((_n) * 0x100)) \
+- : (0x0E018 + ((_n) * 0x40)))
+-#define E1000_TXDCTL(_n) ((_n) < 4 ? (0x03828 + ((_n) * 0x100)) \
+- : (0x0E028 + ((_n) * 0x40)))
+-#define E1000_RXCTL(_n) ((_n) < 4 ? (0x02814 + ((_n) * 0x100)) : \
+- (0x0C014 + ((_n) * 0x40)))
++#define E1000_RDBAL(_n) ((_n) < 4 ? (0x02800 + ((_n) * 0x100)) : \
++ (0x0C000 + ((_n) * 0x40)))
++#define E1000_RDBAH(_n) ((_n) < 4 ? (0x02804 + ((_n) * 0x100)) : \
++ (0x0C004 + ((_n) * 0x40)))
++#define E1000_RDLEN(_n) ((_n) < 4 ? (0x02808 + ((_n) * 0x100)) : \
++ (0x0C008 + ((_n) * 0x40)))
++#define E1000_SRRCTL(_n) ((_n) < 4 ? (0x0280C + ((_n) * 0x100)) : \
++ (0x0C00C + ((_n) * 0x40)))
++#define E1000_RDH(_n) ((_n) < 4 ? (0x02810 + ((_n) * 0x100)) : \
++ (0x0C010 + ((_n) * 0x40)))
++#define E1000_RXCTL(_n) ((_n) < 4 ? (0x02814 + ((_n) * 0x100)) : \
++ (0x0C014 + ((_n) * 0x40)))
+ #define E1000_DCA_RXCTRL(_n) E1000_RXCTL(_n)
+-#define E1000_TXCTL(_n) ((_n) < 4 ? (0x03814 + ((_n) * 0x100)) : \
+- (0x0E014 + ((_n) * 0x40)))
++#define E1000_RDT(_n) ((_n) < 4 ? (0x02818 + ((_n) * 0x100)) : \
++ (0x0C018 + ((_n) * 0x40)))
++#define E1000_RXDCTL(_n) ((_n) < 4 ? (0x02828 + ((_n) * 0x100)) : \
++ (0x0C028 + ((_n) * 0x40)))
++#define E1000_RQDPC(_n) ((_n) < 4 ? (0x02830 + ((_n) * 0x100)) : \
++ (0x0C030 + ((_n) * 0x40)))
++#define E1000_TDBAL(_n) ((_n) < 4 ? (0x03800 + ((_n) * 0x100)) : \
++ (0x0E000 + ((_n) * 0x40)))
++#define E1000_TDBAH(_n) ((_n) < 4 ? (0x03804 + ((_n) * 0x100)) : \
++ (0x0E004 + ((_n) * 0x40)))
++#define E1000_TDLEN(_n) ((_n) < 4 ? (0x03808 + ((_n) * 0x100)) : \
++ (0x0E008 + ((_n) * 0x40)))
++#define E1000_TDH(_n) ((_n) < 4 ? (0x03810 + ((_n) * 0x100)) : \
++ (0x0E010 + ((_n) * 0x40)))
++#define E1000_TXCTL(_n) ((_n) < 4 ? (0x03814 + ((_n) * 0x100)) : \
++ (0x0E014 + ((_n) * 0x40)))
+ #define E1000_DCA_TXCTRL(_n) E1000_TXCTL(_n)
+-#define E1000_TDWBAL(_n) ((_n) < 4 ? (0x03838 + ((_n) * 0x100)) \
+- : (0x0E038 + ((_n) * 0x40)))
+-#define E1000_TDWBAH(_n) ((_n) < 4 ? (0x0383C + ((_n) * 0x100)) \
+- : (0x0E03C + ((_n) * 0x40)))
+-
+-#define E1000_RXPBS 0x02404 /* Rx Packet Buffer Size - RW */
+-#define E1000_TXPBS 0x03404 /* Tx Packet Buffer Size - RW */
++#define E1000_TDT(_n) ((_n) < 4 ? (0x03818 + ((_n) * 0x100)) : \
++ (0x0E018 + ((_n) * 0x40)))
++#define E1000_TXDCTL(_n) ((_n) < 4 ? (0x03828 + ((_n) * 0x100)) : \
++ (0x0E028 + ((_n) * 0x40)))
++#define E1000_TDWBAL(_n) ((_n) < 4 ? (0x03838 + ((_n) * 0x100)) : \
++ (0x0E038 + ((_n) * 0x40)))
++#define E1000_TDWBAH(_n) ((_n) < 4 ? (0x0383C + ((_n) * 0x100)) : \
++ (0x0E03C + ((_n) * 0x40)))
++#define E1000_TARC(_n) (0x03840 + ((_n) * 0x100))
++#define E1000_RSRPD 0x02C00 /* Rx Small Packet Detect - RW */
++#define E1000_RAID 0x02C08 /* Receive Ack Interrupt Delay - RW */
++#define E1000_KABGTXD 0x03004 /* AFE Band Gap Transmit Ref Data */
++#define E1000_PSRTYPE(_i) (0x05480 + ((_i) * 4))
++#define E1000_RAL(_i) (((_i) <= 15) ? (0x05400 + ((_i) * 8)) : \
++ (0x054E0 + ((_i - 16) * 8)))
++#define E1000_RAH(_i) (((_i) <= 15) ? (0x05404 + ((_i) * 8)) : \
++ (0x054E4 + ((_i - 16) * 8)))
++#define E1000_SHRAL(_i) (0x05438 + ((_i) * 8))
++#define E1000_SHRAH(_i) (0x0543C + ((_i) * 8))
++#define E1000_IP4AT_REG(_i) (0x05840 + ((_i) * 8))
++#define E1000_IP6AT_REG(_i) (0x05880 + ((_i) * 4))
++#define E1000_WUPM_REG(_i) (0x05A00 + ((_i) * 4))
++#define E1000_FFMT_REG(_i) (0x09000 + ((_i) * 8))
++#define E1000_FFVT_REG(_i) (0x09800 + ((_i) * 8))
++#define E1000_FFLT_REG(_i) (0x05F00 + ((_i) * 8))
++#define E1000_PBSLAC 0x03100 /* Pkt Buffer Slave Access Control */
++#define E1000_PBSLAD(_n) (0x03110 + (0x4 * (_n))) /* Pkt Buffer DWORD */
++#define E1000_TXPBS 0x03404 /* Tx Packet Buffer Size - RW */
++/* Same as TXPBS, renamed for newer Si - RW */
++#define E1000_ITPBS 0x03404
++#define E1000_TDFH 0x03410 /* Tx Data FIFO Head - RW */
++#define E1000_TDFT 0x03418 /* Tx Data FIFO Tail - RW */
++#define E1000_TDFHS 0x03420 /* Tx Data FIFO Head Saved - RW */
++#define E1000_TDFTS 0x03428 /* Tx Data FIFO Tail Saved - RW */
++#define E1000_TDFPC 0x03430 /* Tx Data FIFO Packet Count - RW */
++#define E1000_TDPUMB 0x0357C /* DMA Tx Desc uC Mail Box - RW */
++#define E1000_TDPUAD 0x03580 /* DMA Tx Desc uC Addr Command - RW */
++#define E1000_TDPUWD 0x03584 /* DMA Tx Desc uC Data Write - RW */
++#define E1000_TDPURD 0x03588 /* DMA Tx Desc uC Data Read - RW */
++#define E1000_TDPUCTL 0x0358C /* DMA Tx Desc uC Control - RW */
++#define E1000_DTXCTL 0x03590 /* DMA Tx Control - RW */
++#define E1000_DTXTCPFLGL 0x0359C /* DMA Tx Control flag low - RW */
++#define E1000_DTXTCPFLGH 0x035A0 /* DMA Tx Control flag high - RW */
++/* DMA Tx Max Total Allow Size Reqs - RW */
++#define E1000_DTXMXSZRQ 0x03540
++#define E1000_TIDV 0x03820 /* Tx Interrupt Delay Value - RW */
++#define E1000_TADV 0x0382C /* Tx Interrupt Absolute Delay Val - RW */
++#define E1000_CRCERRS 0x04000 /* CRC Error Count - R/clr */
++#define E1000_ALGNERRC 0x04004 /* Alignment Error Count - R/clr */
++#define E1000_SYMERRS 0x04008 /* Symbol Error Count - R/clr */
++#define E1000_RXERRC 0x0400C /* Receive Error Count - R/clr */
++#define E1000_MPC 0x04010 /* Missed Packet Count - R/clr */
++#define E1000_SCC 0x04014 /* Single Collision Count - R/clr */
++#define E1000_ECOL 0x04018 /* Excessive Collision Count - R/clr */
++#define E1000_MCC 0x0401C /* Multiple Collision Count - R/clr */
++#define E1000_LATECOL 0x04020 /* Late Collision Count - R/clr */
++#define E1000_COLC 0x04028 /* Collision Count - R/clr */
++#define E1000_DC 0x04030 /* Defer Count - R/clr */
++#define E1000_TNCRS 0x04034 /* Tx-No CRS - R/clr */
++#define E1000_SEC 0x04038 /* Sequence Error Count - R/clr */
++#define E1000_CEXTERR 0x0403C /* Carrier Extension Error Count - R/clr */
++#define E1000_RLEC 0x04040 /* Receive Length Error Count - R/clr */
++#define E1000_XONRXC 0x04048 /* XON Rx Count - R/clr */
++#define E1000_XONTXC 0x0404C /* XON Tx Count - R/clr */
++#define E1000_XOFFRXC 0x04050 /* XOFF Rx Count - R/clr */
++#define E1000_XOFFTXC 0x04054 /* XOFF Tx Count - R/clr */
++#define E1000_FCRUC 0x04058 /* Flow Control Rx Unsupported Count- R/clr */
++#define E1000_PRC64 0x0405C /* Packets Rx (64 bytes) - R/clr */
++#define E1000_PRC127 0x04060 /* Packets Rx (65-127 bytes) - R/clr */
++#define E1000_PRC255 0x04064 /* Packets Rx (128-255 bytes) - R/clr */
++#define E1000_PRC511 0x04068 /* Packets Rx (255-511 bytes) - R/clr */
++#define E1000_PRC1023 0x0406C /* Packets Rx (512-1023 bytes) - R/clr */
++#define E1000_PRC1522 0x04070 /* Packets Rx (1024-1522 bytes) - R/clr */
++#define E1000_GPRC 0x04074 /* Good Packets Rx Count - R/clr */
++#define E1000_BPRC 0x04078 /* Broadcast Packets Rx Count - R/clr */
++#define E1000_MPRC 0x0407C /* Multicast Packets Rx Count - R/clr */
++#define E1000_GPTC 0x04080 /* Good Packets Tx Count - R/clr */
++#define E1000_GORCL 0x04088 /* Good Octets Rx Count Low - R/clr */
++#define E1000_GORCH 0x0408C /* Good Octets Rx Count High - R/clr */
++#define E1000_GOTCL 0x04090 /* Good Octets Tx Count Low - R/clr */
++#define E1000_GOTCH 0x04094 /* Good Octets Tx Count High - R/clr */
++#define E1000_RNBC 0x040A0 /* Rx No Buffers Count - R/clr */
++#define E1000_RUC 0x040A4 /* Rx Undersize Count - R/clr */
++#define E1000_RFC 0x040A8 /* Rx Fragment Count - R/clr */
++#define E1000_ROC 0x040AC /* Rx Oversize Count - R/clr */
++#define E1000_RJC 0x040B0 /* Rx Jabber Count - R/clr */
++#define E1000_MGTPRC 0x040B4 /* Management Packets Rx Count - R/clr */
++#define E1000_MGTPDC 0x040B8 /* Management Packets Dropped Count - R/clr */
++#define E1000_MGTPTC 0x040BC /* Management Packets Tx Count - R/clr */
++#define E1000_TORL 0x040C0 /* Total Octets Rx Low - R/clr */
++#define E1000_TORH 0x040C4 /* Total Octets Rx High - R/clr */
++#define E1000_TOTL 0x040C8 /* Total Octets Tx Low - R/clr */
++#define E1000_TOTH 0x040CC /* Total Octets Tx High - R/clr */
++#define E1000_TPR 0x040D0 /* Total Packets Rx - R/clr */
++#define E1000_TPT 0x040D4 /* Total Packets Tx - R/clr */
++#define E1000_PTC64 0x040D8 /* Packets Tx (64 bytes) - R/clr */
++#define E1000_PTC127 0x040DC /* Packets Tx (65-127 bytes) - R/clr */
++#define E1000_PTC255 0x040E0 /* Packets Tx (128-255 bytes) - R/clr */
++#define E1000_PTC511 0x040E4 /* Packets Tx (256-511 bytes) - R/clr */
++#define E1000_PTC1023 0x040E8 /* Packets Tx (512-1023 bytes) - R/clr */
++#define E1000_PTC1522 0x040EC /* Packets Tx (1024-1522 Bytes) - R/clr */
++#define E1000_MPTC 0x040F0 /* Multicast Packets Tx Count - R/clr */
++#define E1000_BPTC 0x040F4 /* Broadcast Packets Tx Count - R/clr */
++#define E1000_TSCTC 0x040F8 /* TCP Segmentation Context Tx - R/clr */
++#define E1000_TSCTFC 0x040FC /* TCP Segmentation Context Tx Fail - R/clr */
++#define E1000_IAC 0x04100 /* Interrupt Assertion Count */
++#define E1000_ICRXPTC 0x04104 /* Interrupt Cause Rx Pkt Timer Expire Count */
++#define E1000_ICRXATC 0x04108 /* Interrupt Cause Rx Abs Timer Expire Count */
++#define E1000_ICTXPTC 0x0410C /* Interrupt Cause Tx Pkt Timer Expire Count */
++#define E1000_ICTXATC 0x04110 /* Interrupt Cause Tx Abs Timer Expire Count */
++#define E1000_ICTXQEC 0x04118 /* Interrupt Cause Tx Queue Empty Count */
++#define E1000_ICTXQMTC 0x0411C /* Interrupt Cause Tx Queue Min Thresh Count */
++#define E1000_ICRXDMTC 0x04120 /* Interrupt Cause Rx Desc Min Thresh Count */
++#define E1000_ICRXOC 0x04124 /* Interrupt Cause Receiver Overrun Count */
++
++/* Virtualization statistical counters */
++#define E1000_PFVFGPRC(_n) (0x010010 + (0x100 * (_n)))
++#define E1000_PFVFGPTC(_n) (0x010014 + (0x100 * (_n)))
++#define E1000_PFVFGORC(_n) (0x010018 + (0x100 * (_n)))
++#define E1000_PFVFGOTC(_n) (0x010034 + (0x100 * (_n)))
++#define E1000_PFVFMPRC(_n) (0x010038 + (0x100 * (_n)))
++#define E1000_PFVFGPRLBC(_n) (0x010040 + (0x100 * (_n)))
++#define E1000_PFVFGPTLBC(_n) (0x010044 + (0x100 * (_n)))
++#define E1000_PFVFGORLBC(_n) (0x010048 + (0x100 * (_n)))
++#define E1000_PFVFGOTLBC(_n) (0x010050 + (0x100 * (_n)))
++
++/* LinkSec */
++#define E1000_LSECTXUT 0x04300 /* Tx Untagged Pkt Cnt */
++#define E1000_LSECTXPKTE 0x04304 /* Encrypted Tx Pkts Cnt */
++#define E1000_LSECTXPKTP 0x04308 /* Protected Tx Pkt Cnt */
++#define E1000_LSECTXOCTE 0x0430C /* Encrypted Tx Octets Cnt */
++#define E1000_LSECTXOCTP 0x04310 /* Protected Tx Octets Cnt */
++#define E1000_LSECRXUT 0x04314 /* Untagged non-Strict Rx Pkt Cnt */
++#define E1000_LSECRXOCTD 0x0431C /* Rx Octets Decrypted Count */
++#define E1000_LSECRXOCTV 0x04320 /* Rx Octets Validated */
++#define E1000_LSECRXBAD 0x04324 /* Rx Bad Tag */
++#define E1000_LSECRXNOSCI 0x04328 /* Rx Packet No SCI Count */
++#define E1000_LSECRXUNSCI 0x0432C /* Rx Packet Unknown SCI Count */
++#define E1000_LSECRXUNCH 0x04330 /* Rx Unchecked Packets Count */
++#define E1000_LSECRXDELAY 0x04340 /* Rx Delayed Packet Count */
++#define E1000_LSECRXLATE 0x04350 /* Rx Late Packets Count */
++#define E1000_LSECRXOK(_n) (0x04360 + (0x04 * (_n))) /* Rx Pkt OK Cnt */
++#define E1000_LSECRXINV(_n) (0x04380 + (0x04 * (_n))) /* Rx Invalid Cnt */
++#define E1000_LSECRXNV(_n) (0x043A0 + (0x04 * (_n))) /* Rx Not Valid Cnt */
++#define E1000_LSECRXUNSA 0x043C0 /* Rx Unused SA Count */
++#define E1000_LSECRXNUSA 0x043D0 /* Rx Not Using SA Count */
++#define E1000_LSECTXCAP 0x0B000 /* Tx Capabilities Register - RO */
++#define E1000_LSECRXCAP 0x0B300 /* Rx Capabilities Register - RO */
++#define E1000_LSECTXCTRL 0x0B004 /* Tx Control - RW */
++#define E1000_LSECRXCTRL 0x0B304 /* Rx Control - RW */
++#define E1000_LSECTXSCL 0x0B008 /* Tx SCI Low - RW */
++#define E1000_LSECTXSCH 0x0B00C /* Tx SCI High - RW */
++#define E1000_LSECTXSA 0x0B010 /* Tx SA0 - RW */
++#define E1000_LSECTXPN0 0x0B018 /* Tx SA PN 0 - RW */
++#define E1000_LSECTXPN1 0x0B01C /* Tx SA PN 1 - RW */
++#define E1000_LSECRXSCL 0x0B3D0 /* Rx SCI Low - RW */
++#define E1000_LSECRXSCH 0x0B3E0 /* Rx SCI High - RW */
++/* LinkSec Tx 128-bit Key 0 - WO */
++#define E1000_LSECTXKEY0(_n) (0x0B020 + (0x04 * (_n)))
++/* LinkSec Tx 128-bit Key 1 - WO */
++#define E1000_LSECTXKEY1(_n) (0x0B030 + (0x04 * (_n)))
++#define E1000_LSECRXSA(_n) (0x0B310 + (0x04 * (_n))) /* Rx SAs - RW */
++#define E1000_LSECRXPN(_n) (0x0B330 + (0x04 * (_n))) /* Rx SAs - RW */
++/* LinkSec Rx Keys - where _n is the SA no. and _m the 4 dwords of the 128 bit
++ * key - RW.
++ */
++#define E1000_LSECRXKEY(_n, _m) (0x0B350 + (0x10 * (_n)) + (0x04 * (_m)))
+
+-#define E1000_TDFH 0x03410 /* TX Data FIFO Head - RW */
+-#define E1000_TDFT 0x03418 /* TX Data FIFO Tail - RW */
+-#define E1000_TDFHS 0x03420 /* TX Data FIFO Head Saved - RW */
+-#define E1000_TDFPC 0x03430 /* TX Data FIFO Packet Count - RW */
+-#define E1000_DTXCTL 0x03590 /* DMA TX Control - RW */
+-#define E1000_CRCERRS 0x04000 /* CRC Error Count - R/clr */
+-#define E1000_ALGNERRC 0x04004 /* Alignment Error Count - R/clr */
+-#define E1000_SYMERRS 0x04008 /* Symbol Error Count - R/clr */
+-#define E1000_RXERRC 0x0400C /* Receive Error Count - R/clr */
+-#define E1000_MPC 0x04010 /* Missed Packet Count - R/clr */
+-#define E1000_SCC 0x04014 /* Single Collision Count - R/clr */
+-#define E1000_ECOL 0x04018 /* Excessive Collision Count - R/clr */
+-#define E1000_MCC 0x0401C /* Multiple Collision Count - R/clr */
+-#define E1000_LATECOL 0x04020 /* Late Collision Count - R/clr */
+-#define E1000_COLC 0x04028 /* Collision Count - R/clr */
+-#define E1000_DC 0x04030 /* Defer Count - R/clr */
+-#define E1000_TNCRS 0x04034 /* TX-No CRS - R/clr */
+-#define E1000_SEC 0x04038 /* Sequence Error Count - R/clr */
+-#define E1000_CEXTERR 0x0403C /* Carrier Extension Error Count - R/clr */
+-#define E1000_RLEC 0x04040 /* Receive Length Error Count - R/clr */
+-#define E1000_XONRXC 0x04048 /* XON RX Count - R/clr */
+-#define E1000_XONTXC 0x0404C /* XON TX Count - R/clr */
+-#define E1000_XOFFRXC 0x04050 /* XOFF RX Count - R/clr */
+-#define E1000_XOFFTXC 0x04054 /* XOFF TX Count - R/clr */
+-#define E1000_FCRUC 0x04058 /* Flow Control RX Unsupported Count- R/clr */
+-#define E1000_PRC64 0x0405C /* Packets RX (64 bytes) - R/clr */
+-#define E1000_PRC127 0x04060 /* Packets RX (65-127 bytes) - R/clr */
+-#define E1000_PRC255 0x04064 /* Packets RX (128-255 bytes) - R/clr */
+-#define E1000_PRC511 0x04068 /* Packets RX (255-511 bytes) - R/clr */
+-#define E1000_PRC1023 0x0406C /* Packets RX (512-1023 bytes) - R/clr */
+-#define E1000_PRC1522 0x04070 /* Packets RX (1024-1522 bytes) - R/clr */
+-#define E1000_GPRC 0x04074 /* Good Packets RX Count - R/clr */
+-#define E1000_BPRC 0x04078 /* Broadcast Packets RX Count - R/clr */
+-#define E1000_MPRC 0x0407C /* Multicast Packets RX Count - R/clr */
+-#define E1000_GPTC 0x04080 /* Good Packets TX Count - R/clr */
+-#define E1000_GORCL 0x04088 /* Good Octets RX Count Low - R/clr */
+-#define E1000_GORCH 0x0408C /* Good Octets RX Count High - R/clr */
+-#define E1000_GOTCL 0x04090 /* Good Octets TX Count Low - R/clr */
+-#define E1000_GOTCH 0x04094 /* Good Octets TX Count High - R/clr */
+-#define E1000_RNBC 0x040A0 /* RX No Buffers Count - R/clr */
+-#define E1000_RUC 0x040A4 /* RX Undersize Count - R/clr */
+-#define E1000_RFC 0x040A8 /* RX Fragment Count - R/clr */
+-#define E1000_ROC 0x040AC /* RX Oversize Count - R/clr */
+-#define E1000_RJC 0x040B0 /* RX Jabber Count - R/clr */
+-#define E1000_MGTPRC 0x040B4 /* Management Packets RX Count - R/clr */
+-#define E1000_MGTPDC 0x040B8 /* Management Packets Dropped Count - R/clr */
+-#define E1000_MGTPTC 0x040BC /* Management Packets TX Count - R/clr */
+-#define E1000_TORL 0x040C0 /* Total Octets RX Low - R/clr */
+-#define E1000_TORH 0x040C4 /* Total Octets RX High - R/clr */
+-#define E1000_TOTL 0x040C8 /* Total Octets TX Low - R/clr */
+-#define E1000_TOTH 0x040CC /* Total Octets TX High - R/clr */
+-#define E1000_TPR 0x040D0 /* Total Packets RX - R/clr */
+-#define E1000_TPT 0x040D4 /* Total Packets TX - R/clr */
+-#define E1000_PTC64 0x040D8 /* Packets TX (64 bytes) - R/clr */
+-#define E1000_PTC127 0x040DC /* Packets TX (65-127 bytes) - R/clr */
+-#define E1000_PTC255 0x040E0 /* Packets TX (128-255 bytes) - R/clr */
+-#define E1000_PTC511 0x040E4 /* Packets TX (256-511 bytes) - R/clr */
+-#define E1000_PTC1023 0x040E8 /* Packets TX (512-1023 bytes) - R/clr */
+-#define E1000_PTC1522 0x040EC /* Packets TX (1024-1522 Bytes) - R/clr */
+-#define E1000_MPTC 0x040F0 /* Multicast Packets TX Count - R/clr */
+-#define E1000_BPTC 0x040F4 /* Broadcast Packets TX Count - R/clr */
+-#define E1000_TSCTC 0x040F8 /* TCP Segmentation Context TX - R/clr */
+-#define E1000_TSCTFC 0x040FC /* TCP Segmentation Context TX Fail - R/clr */
+-#define E1000_IAC 0x04100 /* Interrupt Assertion Count */
+-/* Interrupt Cause Rx Packet Timer Expire Count */
+-#define E1000_ICRXPTC 0x04104
+-/* Interrupt Cause Rx Absolute Timer Expire Count */
+-#define E1000_ICRXATC 0x04108
+-/* Interrupt Cause Tx Packet Timer Expire Count */
+-#define E1000_ICTXPTC 0x0410C
+-/* Interrupt Cause Tx Absolute Timer Expire Count */
+-#define E1000_ICTXATC 0x04110
+-/* Interrupt Cause Tx Queue Empty Count */
+-#define E1000_ICTXQEC 0x04118
+-/* Interrupt Cause Tx Queue Minimum Threshold Count */
+-#define E1000_ICTXQMTC 0x0411C
+-/* Interrupt Cause Rx Descriptor Minimum Threshold Count */
+-#define E1000_ICRXDMTC 0x04120
+-#define E1000_ICRXOC 0x04124 /* Interrupt Cause Receiver Overrun Count */
+-#define E1000_PCS_CFG0 0x04200 /* PCS Configuration 0 - RW */
+-#define E1000_PCS_LCTL 0x04208 /* PCS Link Control - RW */
+-#define E1000_PCS_LSTAT 0x0420C /* PCS Link Status - RO */
+-#define E1000_CBTMPC 0x0402C /* Circuit Breaker TX Packet Count */
+-#define E1000_HTDPMC 0x0403C /* Host Transmit Discarded Packets */
+-#define E1000_CBRMPC 0x040FC /* Circuit Breaker RX Packet Count */
+-#define E1000_RPTHC 0x04104 /* Rx Packets To Host */
+-#define E1000_HGPTC 0x04118 /* Host Good Packets TX Count */
+-#define E1000_HTCBDPC 0x04124 /* Host TX Circuit Breaker Dropped Count */
+-#define E1000_HGORCL 0x04128 /* Host Good Octets Received Count Low */
+-#define E1000_HGORCH 0x0412C /* Host Good Octets Received Count High */
+-#define E1000_HGOTCL 0x04130 /* Host Good Octets Transmit Count Low */
+-#define E1000_HGOTCH 0x04134 /* Host Good Octets Transmit Count High */
+-#define E1000_LENERRS 0x04138 /* Length Errors Count */
+-#define E1000_SCVPC 0x04228 /* SerDes/SGMII Code Violation Pkt Count */
+-#define E1000_PCS_ANADV 0x04218 /* AN advertisement - RW */
+-#define E1000_PCS_LPAB 0x0421C /* Link Partner Ability - RW */
+-#define E1000_PCS_NPTX 0x04220 /* AN Next Page Transmit - RW */
+-#define E1000_PCS_LPABNP 0x04224 /* Link Partner Ability Next Page - RW */
+-#define E1000_RXCSUM 0x05000 /* RX Checksum Control - RW */
+-#define E1000_RLPML 0x05004 /* RX Long Packet Max Length */
+-#define E1000_RFCTL 0x05008 /* Receive Filter Control*/
+-#define E1000_MTA 0x05200 /* Multicast Table Array - RW Array */
+-#define E1000_RA 0x05400 /* Receive Address - RW Array */
+-#define E1000_RA2 0x054E0 /* 2nd half of Rx address array - RW Array */
+-#define E1000_PSRTYPE(_i) (0x05480 + ((_i) * 4))
+-#define E1000_RAL(_i) (((_i) <= 15) ? (0x05400 + ((_i) * 8)) : \
+- (0x054E0 + ((_i - 16) * 8)))
+-#define E1000_RAH(_i) (((_i) <= 15) ? (0x05404 + ((_i) * 8)) : \
+- (0x054E4 + ((_i - 16) * 8)))
+-#define E1000_IP4AT_REG(_i) (0x05840 + ((_i) * 8))
+-#define E1000_IP6AT_REG(_i) (0x05880 + ((_i) * 4))
+-#define E1000_WUPM_REG(_i) (0x05A00 + ((_i) * 4))
+-#define E1000_FFMT_REG(_i) (0x09000 + ((_i) * 8))
+-#define E1000_FFVT_REG(_i) (0x09800 + ((_i) * 8))
+-#define E1000_FFLT_REG(_i) (0x05F00 + ((_i) * 8))
+-#define E1000_VFTA 0x05600 /* VLAN Filter Table Array - RW Array */
+-#define E1000_VT_CTL 0x0581C /* VMDq Control - RW */
+-#define E1000_WUC 0x05800 /* Wakeup Control - RW */
+-#define E1000_WUFC 0x05808 /* Wakeup Filter Control - RW */
+-#define E1000_WUS 0x05810 /* Wakeup Status - RO */
+-#define E1000_MANC 0x05820 /* Management Control - RW */
+-#define E1000_IPAV 0x05838 /* IP Address Valid - RW */
+-#define E1000_WUPL 0x05900 /* Wakeup Packet Length - RW */
+-
+-#define E1000_SW_FW_SYNC 0x05B5C /* Software-Firmware Synchronization - RW */
+-#define E1000_CCMCTL 0x05B48 /* CCM Control Register */
+-#define E1000_GIOCTL 0x05B44 /* GIO Analog Control Register */
+-#define E1000_SCCTL 0x05B4C /* PCIc PLL Configuration Register */
+-#define E1000_GCR 0x05B00 /* PCI-Ex Control */
+-#define E1000_FACTPS 0x05B30 /* Function Active and Power State to MNG */
+-#define E1000_SWSM 0x05B50 /* SW Semaphore */
+-#define E1000_FWSM 0x05B54 /* FW Semaphore */
+-#define E1000_DCA_CTRL 0x05B74 /* DCA Control - RW */
++#define E1000_SSVPC 0x041A0 /* Switch Security Violation Pkt Cnt */
++#define E1000_IPSCTRL 0xB430 /* IpSec Control Register */
++#define E1000_IPSRXCMD 0x0B408 /* IPSec Rx Command Register - RW */
++#define E1000_IPSRXIDX 0x0B400 /* IPSec Rx Index - RW */
++/* IPSec Rx IPv4/v6 Address - RW */
++#define E1000_IPSRXIPADDR(_n) (0x0B420 + (0x04 * (_n)))
++/* IPSec Rx 128-bit Key - RW */
++#define E1000_IPSRXKEY(_n) (0x0B410 + (0x04 * (_n)))
++#define E1000_IPSRXSALT 0x0B404 /* IPSec Rx Salt - RW */
++#define E1000_IPSRXSPI 0x0B40C /* IPSec Rx SPI - RW */
++/* IPSec Tx 128-bit Key - RW */
++#define E1000_IPSTXKEY(_n) (0x0B460 + (0x04 * (_n)))
++#define E1000_IPSTXSALT 0x0B454 /* IPSec Tx Salt - RW */
++#define E1000_IPSTXIDX 0x0B450 /* IPSec Tx SA IDX - RW */
++#define E1000_PCS_CFG0 0x04200 /* PCS Configuration 0 - RW */
++#define E1000_PCS_LCTL 0x04208 /* PCS Link Control - RW */
++#define E1000_PCS_LSTAT 0x0420C /* PCS Link Status - RO */
++#define E1000_CBTMPC 0x0402C /* Circuit Breaker Tx Packet Count */
++#define E1000_HTDPMC 0x0403C /* Host Transmit Discarded Packets */
++#define E1000_CBRDPC 0x04044 /* Circuit Breaker Rx Dropped Count */
++#define E1000_CBRMPC 0x040FC /* Circuit Breaker Rx Packet Count */
++#define E1000_RPTHC 0x04104 /* Rx Packets To Host */
++#define E1000_HGPTC 0x04118 /* Host Good Packets Tx Count */
++#define E1000_HTCBDPC 0x04124 /* Host Tx Circuit Breaker Dropped Count */
++#define E1000_HGORCL 0x04128 /* Host Good Octets Received Count Low */
++#define E1000_HGORCH 0x0412C /* Host Good Octets Received Count High */
++#define E1000_HGOTCL 0x04130 /* Host Good Octets Transmit Count Low */
++#define E1000_HGOTCH 0x04134 /* Host Good Octets Transmit Count High */
++#define E1000_LENERRS 0x04138 /* Length Errors Count */
++#define E1000_SCVPC 0x04228 /* SerDes/SGMII Code Violation Pkt Count */
++#define E1000_HRMPC 0x0A018 /* Header Redirection Missed Packet Count */
++#define E1000_PCS_ANADV 0x04218 /* AN advertisement - RW */
++#define E1000_PCS_LPAB 0x0421C /* Link Partner Ability - RW */
++#define E1000_PCS_NPTX 0x04220 /* AN Next Page Transmit - RW */
++#define E1000_PCS_LPABNP 0x04224 /* Link Partner Ability Next Pg - RW */
++#define E1000_RXCSUM 0x05000 /* Rx Checksum Control - RW */
++#define E1000_RLPML 0x05004 /* Rx Long Packet Max Length */
++#define E1000_RFCTL 0x05008 /* Receive Filter Control*/
++#define E1000_MTA 0x05200 /* Multicast Table Array - RW Array */
++#define E1000_RA 0x05400 /* Receive Address - RW Array */
++#define E1000_RA2 0x054E0 /* 2nd half of Rx address array - RW Array */
++#define E1000_VFTA 0x05600 /* VLAN Filter Table Array - RW Array */
++#define E1000_VT_CTL 0x0581C /* VMDq Control - RW */
++#define E1000_CIAA 0x05B88 /* Config Indirect Access Address - RW */
++#define E1000_CIAD 0x05B8C /* Config Indirect Access Data - RW */
++#define E1000_VFQA0 0x0B000 /* VLAN Filter Queue Array 0 - RW Array */
++#define E1000_VFQA1 0x0B200 /* VLAN Filter Queue Array 1 - RW Array */
++#define E1000_WUC 0x05800 /* Wakeup Control - RW */
++#define E1000_WUFC 0x05808 /* Wakeup Filter Control - RW */
++#define E1000_WUS 0x05810 /* Wakeup Status - RO */
++#define E1000_MANC 0x05820 /* Management Control - RW */
++#define E1000_IPAV 0x05838 /* IP Address Valid - RW */
++#define E1000_IP4AT 0x05840 /* IPv4 Address Table - RW Array */
++#define E1000_IP6AT 0x05880 /* IPv6 Address Table - RW Array */
++#define E1000_WUPL 0x05900 /* Wakeup Packet Length - RW */
++#define E1000_WUPM 0x05A00 /* Wakeup Packet Memory - RO A */
++#define E1000_PBACL 0x05B68 /* MSIx PBA Clear - Read/Write 1's to clear */
++#define E1000_FFLT 0x05F00 /* Flexible Filter Length Table - RW Array */
++#define E1000_HOST_IF 0x08800 /* Host Interface */
++#define E1000_HIBBA 0x8F40 /* Host Interface Buffer Base Address */
++/* Flexible Host Filter Table */
++#define E1000_FHFT(_n) (0x09000 + ((_n) * 0x100))
++/* Ext Flexible Host Filter Table */
++#define E1000_FHFT_EXT(_n) (0x09A00 + ((_n) * 0x100))
++
++#define E1000_KMRNCTRLSTA 0x00034 /* MAC-PHY interface - RW */
++#define E1000_MANC2H 0x05860 /* Management Control To Host - RW */
++/* Management Decision Filters */
++#define E1000_MDEF(_n) (0x05890 + (4 * (_n)))
++#define E1000_SW_FW_SYNC 0x05B5C /* SW-FW Synchronization - RW */
++#define E1000_CCMCTL 0x05B48 /* CCM Control Register */
++#define E1000_GIOCTL 0x05B44 /* GIO Analog Control Register */
++#define E1000_SCCTL 0x05B4C /* PCIc PLL Configuration Register */
++#define E1000_GCR 0x05B00 /* PCI-Ex Control */
++#define E1000_GCR2 0x05B64 /* PCI-Ex Control #2 */
++#define E1000_GSCL_1 0x05B10 /* PCI-Ex Statistic Control #1 */
++#define E1000_GSCL_2 0x05B14 /* PCI-Ex Statistic Control #2 */
++#define E1000_GSCL_3 0x05B18 /* PCI-Ex Statistic Control #3 */
++#define E1000_GSCL_4 0x05B1C /* PCI-Ex Statistic Control #4 */
++#define E1000_FACTPS 0x05B30 /* Function Active and Power State to MNG */
++#define E1000_SWSM 0x05B50 /* SW Semaphore */
++#define E1000_FWSM 0x05B54 /* FW Semaphore */
++/* Driver-only SW semaphore (not used by BOOT agents) */
++#define E1000_SWSM2 0x05B58
++#define E1000_DCA_ID 0x05B70 /* DCA Requester ID Information - RO */
++#define E1000_DCA_CTRL 0x05B74 /* DCA Control - RW */
++#define E1000_UFUSE 0x05B78 /* UFUSE - RO */
++#define E1000_FFLT_DBG 0x05F04 /* Debug Register */
++#define E1000_HICR 0x08F00 /* Host Interface Control */
++#define E1000_FWSTS 0x08F0C /* FW Status */
+
+ /* RSS registers */
+-#define E1000_MRQC 0x05818 /* Multiple Receive Control - RW */
+-#define E1000_IMIR(_i) (0x05A80 + ((_i) * 4)) /* Immediate Interrupt */
+-#define E1000_IMIREXT(_i) (0x05AA0 + ((_i) * 4)) /* Immediate Interrupt Ext*/
+-#define E1000_IMIRVP 0x05AC0 /* Immediate Interrupt RX VLAN Priority - RW */
+-/* MSI-X Allocation Register (_i) - RW */
+-#define E1000_MSIXBM(_i) (0x01600 + ((_i) * 4))
+-/* Redirection Table - RW Array */
+-#define E1000_RETA(_i) (0x05C00 + ((_i) * 4))
+-#define E1000_RSSRK(_i) (0x05C80 + ((_i) * 4)) /* RSS Random Key - RW Array */
+-
++#define E1000_CPUVEC 0x02C10 /* CPU Vector Register - RW */
++#define E1000_MRQC 0x05818 /* Multiple Receive Control - RW */
++#define E1000_IMIR(_i) (0x05A80 + ((_i) * 4)) /* Immediate Interrupt */
++#define E1000_IMIREXT(_i) (0x05AA0 + ((_i) * 4)) /* Immediate INTR Ext*/
++#define E1000_IMIRVP 0x05AC0 /* Immediate INT Rx VLAN Priority -RW */
++#define E1000_MSIXBM(_i) (0x01600 + ((_i) * 4)) /* MSI-X Alloc Reg -RW */
++#define E1000_RETA(_i) (0x05C00 + ((_i) * 4)) /* Redirection Table - RW */
++#define E1000_RSSRK(_i) (0x05C80 + ((_i) * 4)) /* RSS Random Key - RW */
++#define E1000_RSSIM 0x05864 /* RSS Interrupt Mask */
++#define E1000_RSSIR 0x05868 /* RSS Interrupt Request */
+ /* VT Registers */
+-#define E1000_MBVFICR 0x00C80 /* Mailbox VF Cause - RWC */
+-#define E1000_MBVFIMR 0x00C84 /* Mailbox VF int Mask - RW */
+-#define E1000_VFLRE 0x00C88 /* VF Register Events - RWC */
+-#define E1000_VFRE 0x00C8C /* VF Receive Enables */
+-#define E1000_VFTE 0x00C90 /* VF Transmit Enables */
+-#define E1000_QDE 0x02408 /* Queue Drop Enable - RW */
+-#define E1000_DTXSWC 0x03500 /* DMA Tx Switch Control - RW */
+-#define E1000_WVBR 0x03554 /* VM Wrong Behavior - RWS */
+-#define E1000_RPLOLR 0x05AF0 /* Replication Offload - RW */
+-#define E1000_UTA 0x0A000 /* Unicast Table Array - RW */
+-#define E1000_IOVTCL 0x05BBC /* IOV Control Register */
+-#define E1000_TXSWC 0x05ACC /* Tx Switch Control */
++#define E1000_SWPBS 0x03004 /* Switch Packet Buffer Size - RW */
++#define E1000_MBVFICR 0x00C80 /* Mailbox VF Cause - RWC */
++#define E1000_MBVFIMR 0x00C84 /* Mailbox VF int Mask - RW */
++#define E1000_VFLRE 0x00C88 /* VF Register Events - RWC */
++#define E1000_VFRE 0x00C8C /* VF Receive Enables */
++#define E1000_VFTE 0x00C90 /* VF Transmit Enables */
++#define E1000_QDE 0x02408 /* Queue Drop Enable - RW */
++#define E1000_DTXSWC 0x03500 /* DMA Tx Switch Control - RW */
++#define E1000_WVBR 0x03554 /* VM Wrong Behavior - RWS */
++#define E1000_RPLOLR 0x05AF0 /* Replication Offload - RW */
++#define E1000_UTA 0x0A000 /* Unicast Table Array - RW */
++#define E1000_IOVTCL 0x05BBC /* IOV Control Register */
++#define E1000_VMRCTL 0X05D80 /* Virtual Mirror Rule Control */
++#define E1000_VMRVLAN 0x05D90 /* Virtual Mirror Rule VLAN */
++#define E1000_VMRVM 0x05DA0 /* Virtual Mirror Rule VM */
++#define E1000_MDFB 0x03558 /* Malicious Driver free block */
++#define E1000_LVMMC 0x03548 /* Last VM Misbehavior cause */
++#define E1000_TXSWC 0x05ACC /* Tx Switch Control */
++#define E1000_SCCRL 0x05DB0 /* Storm Control Control */
++#define E1000_BSCTRH 0x05DB8 /* Broadcast Storm Control Threshold */
++#define E1000_MSCTRH 0x05DBC /* Multicast Storm Control Threshold */
+ /* These act per VF so an array friendly macro is used */
+-#define E1000_P2VMAILBOX(_n) (0x00C00 + (4 * (_n)))
+-#define E1000_VMBMEM(_n) (0x00800 + (64 * (_n)))
+-#define E1000_VMOLR(_n) (0x05AD0 + (4 * (_n)))
+-#define E1000_DVMOLR(_n) (0x0C038 + (64 * (_n)))
+-#define E1000_VLVF(_n) (0x05D00 + (4 * (_n))) /* VLAN VM Filter */
+-#define E1000_VMVIR(_n) (0x03700 + (4 * (_n)))
+-
+-struct e1000_hw;
+-
+-u32 igb_rd32(struct e1000_hw *hw, u32 reg);
+-
+-/* write operations, indexed using DWORDS */
+-#define wr32(reg, val) \
+-do { \
+- u8 __iomem *hw_addr = ACCESS_ONCE((hw)->hw_addr); \
+- if (!E1000_REMOVED(hw_addr)) \
+- writel((val), &hw_addr[(reg)]); \
+-} while (0)
+-
+-#define rd32(reg) (igb_rd32(hw, reg))
++#define E1000_V2PMAILBOX(_n) (0x00C40 + (4 * (_n)))
++#define E1000_P2VMAILBOX(_n) (0x00C00 + (4 * (_n)))
++#define E1000_VMBMEM(_n) (0x00800 + (64 * (_n)))
++#define E1000_VFVMBMEM(_n) (0x00800 + (_n))
++#define E1000_VMOLR(_n) (0x05AD0 + (4 * (_n)))
++/* VLAN Virtual Machine Filter - RW */
++#define E1000_VLVF(_n) (0x05D00 + (4 * (_n)))
++#define E1000_VMVIR(_n) (0x03700 + (4 * (_n)))
++#define E1000_DVMOLR(_n) (0x0C038 + (0x40 * (_n))) /* DMA VM offload */
++#define E1000_VTCTRL(_n) (0x10000 + (0x100 * (_n))) /* VT Control */
++#define E1000_TSYNCRXCTL 0x0B620 /* Rx Time Sync Control register - RW */
++#define E1000_TSYNCTXCTL 0x0B614 /* Tx Time Sync Control register - RW */
++#define E1000_TSYNCRXCFG 0x05F50 /* Time Sync Rx Configuration - RW */
++#define E1000_RXSTMPL 0x0B624 /* Rx timestamp Low - RO */
++#define E1000_RXSTMPH 0x0B628 /* Rx timestamp High - RO */
++#define E1000_RXSATRL 0x0B62C /* Rx timestamp attribute low - RO */
++#define E1000_RXSATRH 0x0B630 /* Rx timestamp attribute high - RO */
++#define E1000_TXSTMPL 0x0B618 /* Tx timestamp value Low - RO */
++#define E1000_TXSTMPH 0x0B61C /* Tx timestamp value High - RO */
++#define E1000_SYSTIML 0x0B600 /* System time register Low - RO */
++#define E1000_SYSTIMH 0x0B604 /* System time register High - RO */
++#define E1000_TIMINCA 0x0B608 /* Increment attributes register - RW */
++#define E1000_TIMADJL 0x0B60C /* Time sync time adjustment offset Low - RW */
++#define E1000_TIMADJH 0x0B610 /* Time sync time adjustment offset High - RW */
++#define E1000_TSAUXC 0x0B640 /* Timesync Auxiliary Control register */
++#define E1000_SYSTIMR 0x0B6F8 /* System time register Residue */
++#define E1000_TSICR 0x0B66C /* Interrupt Cause Register */
++#define E1000_TSIM 0x0B674 /* Interrupt Mask Register */
+
+-#define wrfl() ((void)rd32(E1000_STATUS))
+-
+-#define array_wr32(reg, offset, value) \
+- wr32((reg) + ((offset) << 2), (value))
+-
+-#define array_rd32(reg, offset) \
+- (readl(hw->hw_addr + reg + ((offset) << 2)))
++/* Filtering Registers */
++#define E1000_SAQF(_n) (0x05980 + (4 * (_n))) /* Source Address Queue Fltr */
++#define E1000_DAQF(_n) (0x059A0 + (4 * (_n))) /* Dest Address Queue Fltr */
++#define E1000_SPQF(_n) (0x059C0 + (4 * (_n))) /* Source Port Queue Fltr */
++#define E1000_FTQF(_n) (0x059E0 + (4 * (_n))) /* 5-tuple Queue Fltr */
++#define E1000_TTQF(_n) (0x059E0 + (4 * (_n))) /* 2-tuple Queue Fltr */
++#define E1000_SYNQF(_n) (0x055FC + (4 * (_n))) /* SYN Packet Queue Fltr */
++#define E1000_ETQF(_n) (0x05CB0 + (4 * (_n))) /* EType Queue Fltr */
++
++#define E1000_RTTDCS 0x3600 /* Reedtown Tx Desc plane control and status */
++#define E1000_RTTPCS 0x3474 /* Reedtown Tx Packet Plane control and status */
++#define E1000_RTRPCS 0x2474 /* Rx packet plane control and status */
++#define E1000_RTRUP2TC 0x05AC4 /* Rx User Priority to Traffic Class */
++#define E1000_RTTUP2TC 0x0418 /* Transmit User Priority to Traffic Class */
++/* Tx Desc plane TC Rate-scheduler config */
++#define E1000_RTTDTCRC(_n) (0x3610 + ((_n) * 4))
++/* Tx Packet plane TC Rate-Scheduler Config */
++#define E1000_RTTPTCRC(_n) (0x3480 + ((_n) * 4))
++/* Rx Packet plane TC Rate-Scheduler Config */
++#define E1000_RTRPTCRC(_n) (0x2480 + ((_n) * 4))
++/* Tx Desc Plane TC Rate-Scheduler Status */
++#define E1000_RTTDTCRS(_n) (0x3630 + ((_n) * 4))
++/* Tx Desc Plane TC Rate-Scheduler MMW */
++#define E1000_RTTDTCRM(_n) (0x3650 + ((_n) * 4))
++/* Tx Packet plane TC Rate-Scheduler Status */
++#define E1000_RTTPTCRS(_n) (0x34A0 + ((_n) * 4))
++/* Tx Packet plane TC Rate-scheduler MMW */
++#define E1000_RTTPTCRM(_n) (0x34C0 + ((_n) * 4))
++/* Rx Packet plane TC Rate-Scheduler Status */
++#define E1000_RTRPTCRS(_n) (0x24A0 + ((_n) * 4))
++/* Rx Packet plane TC Rate-Scheduler MMW */
++#define E1000_RTRPTCRM(_n) (0x24C0 + ((_n) * 4))
++/* Tx Desc plane VM Rate-Scheduler MMW*/
++#define E1000_RTTDVMRM(_n) (0x3670 + ((_n) * 4))
++/* Tx BCN Rate-Scheduler MMW */
++#define E1000_RTTBCNRM(_n) (0x3690 + ((_n) * 4))
++#define E1000_RTTDQSEL 0x3604 /* Tx Desc Plane Queue Select */
++#define E1000_RTTDVMRC 0x3608 /* Tx Desc Plane VM Rate-Scheduler Config */
++#define E1000_RTTDVMRS 0x360C /* Tx Desc Plane VM Rate-Scheduler Status */
++#define E1000_RTTBCNRC 0x36B0 /* Tx BCN Rate-Scheduler Config */
++#define E1000_RTTBCNRS 0x36B4 /* Tx BCN Rate-Scheduler Status */
++#define E1000_RTTBCNCR 0xB200 /* Tx BCN Control Register */
++#define E1000_RTTBCNTG 0x35A4 /* Tx BCN Tagging */
++#define E1000_RTTBCNCP 0xB208 /* Tx BCN Congestion point */
++#define E1000_RTRBCNCR 0xB20C /* Rx BCN Control Register */
++#define E1000_RTTBCNRD 0x36B8 /* Tx BCN Rate Drift */
++#define E1000_PFCTOP 0x1080 /* Priority Flow Control Type and Opcode */
++#define E1000_RTTBCNIDX 0xB204 /* Tx BCN Congestion Point */
++#define E1000_RTTBCNACH 0x0B214 /* Tx BCN Control High */
++#define E1000_RTTBCNACL 0x0B210 /* Tx BCN Control Low */
+
+ /* DMA Coalescing registers */
++#define E1000_DMACR 0x02508 /* Control Register */
++#define E1000_DMCTXTH 0x03550 /* Transmit Threshold */
++#define E1000_DMCTLX 0x02514 /* Time to Lx Request */
++#define E1000_DMCRTRH 0x05DD0 /* Receive Packet Rate Threshold */
++#define E1000_DMCCNT 0x05DD4 /* Current Rx Count */
++#define E1000_FCRTC 0x02170 /* Flow Control Rx high watermark */
+ #define E1000_PCIEMISC 0x05BB8 /* PCIE misc config register */
+
+-/* Energy Efficient Ethernet "EEE" register */
+-#define E1000_IPCNFG 0x0E38 /* Internal PHY Configuration */
+-#define E1000_EEER 0x0E30 /* Energy Efficient Ethernet */
+-#define E1000_EEE_SU 0X0E34 /* EEE Setup */
+-#define E1000_EMIADD 0x10 /* Extended Memory Indirect Address */
+-#define E1000_EMIDATA 0x11 /* Extended Memory Indirect Data */
+-#define E1000_MMDAC 13 /* MMD Access Control */
+-#define E1000_MMDAAD 14 /* MMD Access Address/Data */
++/* PCIe Parity Status Register */
++#define E1000_PCIEERRSTS 0x05BA8
+
+-/* Thermal Sensor Register */
++#define E1000_PROXYS 0x5F64 /* Proxying Status */
++#define E1000_PROXYFC 0x5F60 /* Proxying Filter Control */
++/* Thermal sensor configuration and status registers */
++#define E1000_THMJT 0x08100 /* Junction Temperature */
++#define E1000_THLOWTC 0x08104 /* Low Threshold Control */
++#define E1000_THMIDTC 0x08108 /* Mid Threshold Control */
++#define E1000_THHIGHTC 0x0810C /* High Threshold Control */
+ #define E1000_THSTAT 0x08110 /* Thermal Sensor Status */
+
++/* Energy Efficient Ethernet "EEE" registers */
++#define E1000_IPCNFG 0x0E38 /* Internal PHY Configuration */
++#define E1000_LTRC 0x01A0 /* Latency Tolerance Reporting Control */
++#define E1000_EEER 0x0E30 /* Energy Efficient Ethernet "EEE"*/
++#define E1000_EEE_SU 0x0E34 /* EEE Setup */
++#define E1000_TLPIC 0x4148 /* EEE Tx LPI Count - TLPIC */
++#define E1000_RLPIC 0x414C /* EEE Rx LPI Count - RLPIC */
++
+ /* OS2BMC Registers */
+ #define E1000_B2OSPC 0x08FE0 /* BMC2OS packets sent by BMC */
+ #define E1000_B2OGPRC 0x04158 /* BMC2OS packets received by host */
+ #define E1000_O2BGPTC 0x08FE4 /* OS2BMC packets received by BMC */
+ #define E1000_O2BSPC 0x0415C /* OS2BMC packets transmitted by host */
+
+-#define E1000_SRWR 0x12018 /* Shadow Ram Write Register - RW */
+-#define E1000_I210_FLMNGCTL 0x12038
+-#define E1000_I210_FLMNGDATA 0x1203C
+-#define E1000_I210_FLMNGCNT 0x12040
+-
+-#define E1000_I210_FLSWCTL 0x12048
+-#define E1000_I210_FLSWDATA 0x1204C
+-#define E1000_I210_FLSWCNT 0x12050
+-
+-#define E1000_I210_FLA 0x1201C
+-
+-#define E1000_INVM_DATA_REG(_n) (0x12120 + 4*(_n))
+-#define E1000_INVM_SIZE 64 /* Number of INVM Data Registers */
+-
+-#define E1000_REMOVED(h) unlikely(!(h))
+-
+ #endif
+diff -Nu a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h
+--- a/drivers/net/ethernet/intel/igb/igb.h 2016-11-13 09:20:24.790171605 +0000
++++ b/drivers/net/ethernet/intel/igb/igb.h 2016-11-14 14:32:08.579567168 +0000
+@@ -1,107 +1,149 @@
+-/* Intel(R) Gigabit Ethernet Linux driver
+- * Copyright(c) 2007-2014 Intel Corporation.
+- *
+- * This program is free software; you can redistribute it and/or modify it
+- * under the terms and conditions of the GNU General Public License,
+- * version 2, as published by the Free Software Foundation.
+- *
+- * This program is distributed in the hope it will be useful, but WITHOUT
+- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+- * more details.
+- *
+- * You should have received a copy of the GNU General Public License along with
+- * this program; if not, see .
+- *
+- * The full GNU General Public License is included in this distribution in
+- * the file called "COPYING".
+- *
+- * Contact Information:
+- * e1000-devel Mailing List
+- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+- */
++/*******************************************************************************
++
++ Intel(R) Gigabit Ethernet Linux driver
++ Copyright(c) 2007-2015 Intel Corporation.
++
++ This program is free software; you can redistribute it and/or modify it
++ under the terms and conditions of the GNU General Public License,
++ version 2, as published by the Free Software Foundation.
++
++ This program is distributed in the hope it will be useful, but WITHOUT
++ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ more details.
++
++ The full GNU General Public License is included in this distribution in
++ the file called "COPYING".
++
++ Contact Information:
++ Linux NICS
++ e1000-devel Mailing List
++ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
++
++*******************************************************************************/
+
+ /* Linux PRO/1000 Ethernet Driver main header file */
+
+ #ifndef _IGB_H_
+ #define _IGB_H_
+
+-#include "e1000_mac.h"
++#include
++
++#ifndef IGB_NO_LRO
++#include
++#endif
++
++#include
++#include
++#include
++
++#ifdef SIOCETHTOOL
++#include
++#endif
++
++struct igb_adapter;
++
++#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
++#define IGB_DCA
++#endif
++#ifdef IGB_DCA
++#include
++#endif
++
++#include "kcompat.h"
++
++#ifdef HAVE_SCTP
++#include
++#endif
++
++#include "e1000_api.h"
+ #include "e1000_82575.h"
++#include "e1000_manage.h"
++#include "e1000_mbx.h"
++
++#define IGB_ERR(args...) pr_err(KERN_ERR "igb: " args)
+
++#define PFX "igb: "
++#define DPRINTK(nlevel, klevel, fmt, args...) \
++ (void)((NETIF_MSG_##nlevel & adapter->msg_enable) && \
++ printk(KERN_##klevel PFX "%s: %s: " fmt, adapter->netdev->name, \
++ __func__ , ## args))
++
++#ifdef HAVE_PTP_1588_CLOCK
++#ifdef HAVE_INCLUDE_LINUX_TIMECOUNTER_H
++#include
++#else
+ #include
++#endif /* HAVE_INCLUDE_TIMECOUNTER_H */
+ #include
+ #include
+-#include
+-#include
++#endif /* HAVE_PTP_1588_CLOCK */
++
++#ifdef HAVE_I2C_SUPPORT
+ #include
+ #include
+-#include
+-#include
+-
+-struct igb_adapter;
+-
+-#define E1000_PCS_CFG_IGN_SD 1
++#endif /* HAVE_I2C_SUPPORT */
+
+ /* Interrupt defines */
+-#define IGB_START_ITR 648 /* ~6000 ints/sec */
+-#define IGB_4K_ITR 980
+-#define IGB_20K_ITR 196
+-#define IGB_70K_ITR 56
++#define IGB_START_ITR 648 /* ~6000 ints/sec */
++#define IGB_4K_ITR 980
++#define IGB_20K_ITR 196
++#define IGB_70K_ITR 56
++
++/* Interrupt modes, as used by the IntMode paramter */
++#define IGB_INT_MODE_LEGACY 0
++#define IGB_INT_MODE_MSI 1
++#define IGB_INT_MODE_MSIX 2
+
+ /* TX/RX descriptor defines */
+-#define IGB_DEFAULT_TXD 256
+-#define IGB_DEFAULT_TX_WORK 128
+-#define IGB_MIN_TXD 80
+-#define IGB_MAX_TXD 4096
+-
+-#define IGB_DEFAULT_RXD 256
+-#define IGB_MIN_RXD 80
+-#define IGB_MAX_RXD 4096
+-
+-#define IGB_DEFAULT_ITR 3 /* dynamic */
+-#define IGB_MAX_ITR_USECS 10000
+-#define IGB_MIN_ITR_USECS 10
+-#define NON_Q_VECTORS 1
+-#define MAX_Q_VECTORS 8
+-#define MAX_MSIX_ENTRIES 10
++#define IGB_DEFAULT_TXD 256
++#define IGB_DEFAULT_TX_WORK 128
++#define IGB_MIN_TXD 80
++#define IGB_MAX_TXD 4096
++
++#define IGB_DEFAULT_RXD 256
++#define IGB_MIN_RXD 80
++#define IGB_MAX_RXD 4096
++
++#define IGB_MIN_ITR_USECS 10 /* 100k irq/sec */
++#define IGB_MAX_ITR_USECS 8191 /* 120 irq/sec */
++
++#define NON_Q_VECTORS 1
++#define MAX_Q_VECTORS 10
+
+ /* Transmit and receive queues */
+-#define IGB_MAX_RX_QUEUES 8
+-#define IGB_MAX_RX_QUEUES_82575 4
+-#define IGB_MAX_RX_QUEUES_I211 2
+-#define IGB_MAX_TX_QUEUES 8
+-#define IGB_MAX_VF_MC_ENTRIES 30
+-#define IGB_MAX_VF_FUNCTIONS 8
+-#define IGB_MAX_VFTA_ENTRIES 128
+-#define IGB_82576_VF_DEV_ID 0x10CA
+-#define IGB_I350_VF_DEV_ID 0x1520
+-
+-/* NVM version defines */
+-#define IGB_MAJOR_MASK 0xF000
+-#define IGB_MINOR_MASK 0x0FF0
+-#define IGB_BUILD_MASK 0x000F
+-#define IGB_COMB_VER_MASK 0x00FF
+-#define IGB_MAJOR_SHIFT 12
+-#define IGB_MINOR_SHIFT 4
+-#define IGB_COMB_VER_SHFT 8
+-#define IGB_NVM_VER_INVALID 0xFFFF
+-#define IGB_ETRACK_SHIFT 16
+-#define NVM_ETRACK_WORD 0x0042
+-#define NVM_COMB_VER_OFF 0x0083
+-#define NVM_COMB_VER_PTR 0x003d
++#define IGB_MAX_RX_QUEUES 16
++#define IGB_MAX_RX_QUEUES_82575 4
++#define IGB_MAX_RX_QUEUES_I211 2
++#define IGB_MAX_TX_QUEUES 16
++
++#define IGB_MAX_VF_MC_ENTRIES 30
++#define IGB_MAX_VF_FUNCTIONS 8
++#define IGB_82576_VF_DEV_ID 0x10CA
++#define IGB_I350_VF_DEV_ID 0x1520
++#define IGB_MAX_UTA_ENTRIES 128
++#define MAX_EMULATION_MAC_ADDRS 16
++#define OUI_LEN 3
++#define IGB_MAX_VMDQ_QUEUES 8
+
+ struct vf_data_storage {
+ unsigned char vf_mac_addresses[ETH_ALEN];
+ u16 vf_mc_hashes[IGB_MAX_VF_MC_ENTRIES];
+ u16 num_vf_mc_hashes;
++ u16 default_vf_vlan_id;
+ u16 vlans_enabled;
++ unsigned char em_mac_addresses[MAX_EMULATION_MAC_ADDRS * ETH_ALEN];
++ u32 uta_table_copy[IGB_MAX_UTA_ENTRIES];
+ u32 flags;
+ unsigned long last_nack;
++#ifdef IFLA_VF_MAX
+ u16 pf_vlan; /* When set, guest VLAN config not allowed. */
+ u16 pf_qos;
+ u16 tx_rate;
++#ifdef HAVE_VF_SPOOFCHK_CONFIGURE
+ bool spoofchk_enabled;
++#endif
++#endif
+ };
+
+ #define IGB_VF_FLAG_CTS 0x00000001 /* VF is clear to send data */
+@@ -125,31 +167,97 @@
+ #define IGB_TX_PTHRESH ((hw->mac.type == e1000_i354) ? 20 : 8)
+ #define IGB_TX_HTHRESH 1
+ #define IGB_RX_WTHRESH ((hw->mac.type == e1000_82576 && \
+- (adapter->flags & IGB_FLAG_HAS_MSIX)) ? 1 : 4)
+-#define IGB_TX_WTHRESH ((hw->mac.type == e1000_82576 && \
+- (adapter->flags & IGB_FLAG_HAS_MSIX)) ? 1 : 16)
++ adapter->msix_entries) ? 1 : 4)
+
+ /* this is the size past which hardware will drop packets when setting LPE=0 */
+ #define MAXIMUM_ETHERNET_VLAN_SIZE 1522
+
++/* NOTE: netdev_alloc_skb reserves 16 bytes, NET_IP_ALIGN means we
++ * reserve 2 more, and skb_shared_info adds an additional 384 more,
++ * this adds roughly 448 bytes of extra data meaning the smallest
++ * allocation we could have is 1K.
++ * i.e. RXBUFFER_512 --> size-1024 slab
++ */
+ /* Supported Rx Buffer Sizes */
+-#define IGB_RXBUFFER_256 256
+-#define IGB_RXBUFFER_2048 2048
+-#define IGB_RX_HDR_LEN IGB_RXBUFFER_256
+-#define IGB_RX_BUFSZ IGB_RXBUFFER_2048
++#define IGB_RXBUFFER_256 256
++#define IGB_RXBUFFER_2048 2048
++#define IGB_RXBUFFER_16384 16384
++#define IGB_RX_HDR_LEN IGB_RXBUFFER_256
++#if MAX_SKB_FRAGS < 8
++#define IGB_RX_BUFSZ ALIGN(MAX_JUMBO_FRAME_SIZE / MAX_SKB_FRAGS, 1024)
++#else
++#define IGB_RX_BUFSZ IGB_RXBUFFER_2048
++#endif
++
++
++/* Packet Buffer allocations */
++#define IGB_PBA_BYTES_SHIFT 0xA
++#define IGB_TX_HEAD_ADDR_SHIFT 7
++#define IGB_PBA_TX_MASK 0xFFFF0000
++
++#define IGB_FC_PAUSE_TIME 0x0680 /* 858 usec */
+
+ /* How many Rx Buffers do we bundle into one write to the hardware ? */
+-#define IGB_RX_BUFFER_WRITE 16 /* Must be power of 2 */
++#define IGB_RX_BUFFER_WRITE 16 /* Must be power of 2 */
+
+-#define AUTO_ALL_MODES 0
+-#define IGB_EEPROM_APME 0x0400
++#define IGB_EEPROM_APME 0x0400
++#define AUTO_ALL_MODES 0
+
+ #ifndef IGB_MASTER_SLAVE
+ /* Switch to override PHY master/slave setting */
+ #define IGB_MASTER_SLAVE e1000_ms_hw_default
+ #endif
+
+-#define IGB_MNG_VLAN_NONE -1
++#define IGB_MNG_VLAN_NONE -1
++
++#ifndef IGB_NO_LRO
++#define IGB_LRO_MAX 32 /*Maximum number of LRO descriptors*/
++struct igb_lro_stats {
++ u32 flushed;
++ u32 coal;
++};
++
++/*
++ * igb_lro_header - header format to be aggregated by LRO
++ * @iph: IP header without options
++ * @tcp: TCP header
++ * @ts: Optional TCP timestamp data in TCP options
++ *
++ * This structure relies on the check above that verifies that the header
++ * is IPv4 and does not contain any options.
++ */
++struct igb_lrohdr {
++ struct iphdr iph;
++ struct tcphdr th;
++ __be32 ts[0];
++};
++
++struct igb_lro_list {
++ struct sk_buff_head active;
++ struct igb_lro_stats stats;
++};
++
++#endif /* IGB_NO_LRO */
++struct igb_cb {
++#ifndef IGB_NO_LRO
++#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT
++ union { /* Union defining head/tail partner */
++ struct sk_buff *head;
++ struct sk_buff *tail;
++ };
++#endif
++ __be32 tsecr; /* timestamp echo response */
++ u32 tsval; /* timestamp value in host order */
++ u32 next_seq; /* next expected sequence number */
++ u16 free; /* 65521 minus total size */
++ u16 mss; /* size of data portion of packet */
++ u16 append_cnt; /* number of skb's appended */
++#endif /* IGB_NO_LRO */
++#ifdef HAVE_VLAN_RX_REGISTER
++ u16 vid; /* VLAN tag */
++#endif
++};
++#define IGB_CB(skb) ((struct igb_cb *)(skb)->cb)
+
+ enum igb_tx_flags {
+ /* cmd_type flags */
+@@ -163,30 +271,28 @@
+ };
+
+ /* VLAN info */
+-#define IGB_TX_FLAGS_VLAN_MASK 0xffff0000
+-#define IGB_TX_FLAGS_VLAN_SHIFT 16
++#define IGB_TX_FLAGS_VLAN_MASK 0xffff0000
++#define IGB_TX_FLAGS_VLAN_SHIFT 16
+
+-/* The largest size we can write to the descriptor is 65535. In order to
++/*
++ * The largest size we can write to the descriptor is 65535. In order to
+ * maintain a power of two alignment we have to limit ourselves to 32K.
+ */
+-#define IGB_MAX_TXD_PWR 15
++#define IGB_MAX_TXD_PWR 15
+ #define IGB_MAX_DATA_PER_TXD (1 << IGB_MAX_TXD_PWR)
+
+ /* Tx Descriptors needed, worst case */
+-#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IGB_MAX_DATA_PER_TXD)
+-#define DESC_NEEDED (MAX_SKB_FRAGS + 4)
+-
+-/* EEPROM byte offsets */
+-#define IGB_SFF_8472_SWAP 0x5C
+-#define IGB_SFF_8472_COMP 0x5E
+-
+-/* Bitmasks */
+-#define IGB_SFF_ADDRESSING_MODE 0x4
+-#define IGB_SFF_8472_UNSUP 0x00
++#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IGB_MAX_DATA_PER_TXD)
++#ifndef MAX_SKB_FRAGS
++#define DESC_NEEDED 4
++#elif (MAX_SKB_FRAGS < 16)
++#define DESC_NEEDED ((MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE)) + 4)
++#else
++#define DESC_NEEDED (MAX_SKB_FRAGS + 4)
++#endif
+
+ /* wrapper around a pointer to a socket buffer,
+- * so a DMA handle can be stored along with the buffer
+- */
++ * so a DMA handle can be stored along with the buffer */
+ struct igb_tx_buffer {
+ union e1000_adv_tx_desc *next_to_watch;
+ unsigned long time_stamp;
+@@ -202,15 +308,18 @@
+
+ struct igb_rx_buffer {
+ dma_addr_t dma;
++#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT
++ struct sk_buff *skb;
++#else
+ struct page *page;
+- unsigned int page_offset;
++ u32 page_offset;
++#endif
+ };
+
+ struct igb_tx_queue_stats {
+ u64 packets;
+ u64 bytes;
+ u64 restart_queue;
+- u64 restart_queue2;
+ };
+
+ struct igb_rx_queue_stats {
+@@ -221,6 +330,18 @@
+ u64 alloc_failed;
+ };
+
++struct igb_rx_packet_stats {
++ u64 ipv4_packets; /* IPv4 headers processed */
++ u64 ipv4e_packets; /* IPv4E headers with extensions processed */
++ u64 ipv6_packets; /* IPv6 headers processed */
++ u64 ipv6e_packets; /* IPv6E headers with extensions processed */
++ u64 tcp_packets; /* TCP headers processed */
++ u64 udp_packets; /* UDP headers processed */
++ u64 sctp_packets; /* SCTP headers processed */
++ u64 nfs_packets; /* NFS headers processe */
++ u64 other_packets;
++};
++
+ struct igb_ring_container {
+ struct igb_ring *ring; /* pointer to linked list of rings */
+ unsigned int total_bytes; /* total bytes processed this int */
+@@ -231,22 +352,22 @@
+ };
+
+ struct igb_ring {
+- struct igb_q_vector *q_vector; /* backlink to q_vector */
+- struct net_device *netdev; /* back pointer to net_device */
+- struct device *dev; /* device pointer for dma mapping */
++ struct igb_q_vector *q_vector; /* backlink to q_vector */
++ struct net_device *netdev; /* back pointer to net_device */
++ struct device *dev; /* device for dma mapping */
+ union { /* array of buffer info structs */
+ struct igb_tx_buffer *tx_buffer_info;
+ struct igb_rx_buffer *rx_buffer_info;
+ };
+- void *desc; /* descriptor ring memory */
+- unsigned long flags; /* ring specific flags */
+- void __iomem *tail; /* pointer to ring tail register */
++ void *desc; /* descriptor ring memory */
++ unsigned long flags; /* ring specific flags */
++ void __iomem *tail; /* pointer to ring tail register */
+ dma_addr_t dma; /* phys address of the ring */
+- unsigned int size; /* length of desc. ring in bytes */
++ unsigned int size; /* length of desc. ring in bytes */
+
+- u16 count; /* number of desc. in the ring */
+- u8 queue_index; /* logical index of the ring*/
+- u8 reg_idx; /* physical index of the ring */
++ u16 count; /* number of desc. in the ring */
++ u8 queue_index; /* logical index of the ring*/
++ u8 reg_idx; /* physical index of the ring */
+
+ /* everything past this point are written often */
+ u16 next_to_clean;
+@@ -257,16 +378,22 @@
+ /* TX */
+ struct {
+ struct igb_tx_queue_stats tx_stats;
+- struct u64_stats_sync tx_syncp;
+- struct u64_stats_sync tx_syncp2;
+ };
+ /* RX */
+ struct {
+- struct sk_buff *skb;
+ struct igb_rx_queue_stats rx_stats;
+- struct u64_stats_sync rx_syncp;
++ struct igb_rx_packet_stats pkt_stats;
++#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT
++ u16 rx_buffer_len;
++#else
++ struct sk_buff *skb;
++#endif
+ };
+ };
++#ifdef CONFIG_IGB_VMDQ_NETDEV
++ struct net_device *vmdq_netdev;
++ int vqueue_index; /* queue index for virtual netdev */
++#endif
+ } ____cacheline_internodealigned_in_smp;
+
+ struct igb_q_vector {
+@@ -281,29 +408,57 @@
+ struct igb_ring_container rx, tx;
+
+ struct napi_struct napi;
++#ifndef IGB_NO_LRO
++ struct igb_lro_list lrolist; /* LRO list for queue vector*/
++#endif
+ struct rcu_head rcu; /* to avoid race with update stats on free */
+ char name[IFNAMSIZ + 9];
++#ifndef HAVE_NETDEV_NAPI_LIST
++ struct net_device poll_dev;
++#endif
+
+ /* for dynamic allocation of rings associated with this q_vector */
+ struct igb_ring ring[0] ____cacheline_internodealigned_in_smp;
+ };
+
+ enum e1000_ring_flags_t {
++#if defined(HAVE_RHEL6_NET_DEVICE_OPS_EXT) || !defined(HAVE_NDO_SET_FEATURES)
++ IGB_RING_FLAG_RX_CSUM,
++#endif
+ IGB_RING_FLAG_RX_SCTP_CSUM,
+ IGB_RING_FLAG_RX_LB_VLAN_BSWAP,
+ IGB_RING_FLAG_TX_CTX_IDX,
+- IGB_RING_FLAG_TX_DETECT_HANG
++ IGB_RING_FLAG_TX_DETECT_HANG,
+ };
+
++struct igb_mac_addr {
++ u8 addr[ETH_ALEN];
++ u16 queue;
++ u16 state; /* bitmask */
++};
++#define IGB_MAC_STATE_DEFAULT 0x1
++#define IGB_MAC_STATE_MODIFIED 0x2
++#define IGB_MAC_STATE_IN_USE 0x4
++
+ #define IGB_TXD_DCMD (E1000_ADVTXD_DCMD_EOP | E1000_ADVTXD_DCMD_RS)
+
+-#define IGB_RX_DESC(R, i) \
++#define IGB_RX_DESC(R, i) \
+ (&(((union e1000_adv_rx_desc *)((R)->desc))[i]))
+-#define IGB_TX_DESC(R, i) \
++#define IGB_TX_DESC(R, i) \
+ (&(((union e1000_adv_tx_desc *)((R)->desc))[i]))
+-#define IGB_TX_CTXTDESC(R, i) \
++#define IGB_TX_CTXTDESC(R, i) \
+ (&(((struct e1000_adv_tx_context_desc *)((R)->desc))[i]))
+
++#ifdef CONFIG_IGB_VMDQ_NETDEV
++#define netdev_ring(ring) \
++ ((ring->vmdq_netdev ? ring->vmdq_netdev : ring->netdev))
++#define ring_queue_index(ring) \
++ ((ring->vmdq_netdev ? ring->vqueue_index : ring->queue_index))
++#else
++#define netdev_ring(ring) (ring->netdev)
++#define ring_queue_index(ring) (ring->queue_index)
++#endif /* CONFIG_IGB_VMDQ_NETDEV */
++
+ /* igb_test_staterr - tests bits within Rx descriptor status and error fields */
+ static inline __le32 igb_test_staterr(union e1000_adv_rx_desc *rx_desc,
+ const u32 stat_err_bits)
+@@ -312,16 +467,27 @@
+ }
+
+ /* igb_desc_unused - calculate if we have unused descriptors */
+-static inline int igb_desc_unused(struct igb_ring *ring)
++static inline u16 igb_desc_unused(const struct igb_ring *ring)
+ {
+- if (ring->next_to_clean > ring->next_to_use)
+- return ring->next_to_clean - ring->next_to_use - 1;
++ u16 ntc = ring->next_to_clean;
++ u16 ntu = ring->next_to_use;
+
+- return ring->count + ring->next_to_clean - ring->next_to_use - 1;
++ return ((ntc > ntu) ? 0 : ring->count) + ntc - ntu - 1;
+ }
+
+-#ifdef CONFIG_IGB_HWMON
++#ifdef CONFIG_BQL
++static inline struct netdev_queue *txring_txq(const struct igb_ring *tx_ring)
++{
++ return netdev_get_tx_queue(tx_ring->netdev, tx_ring->queue_index);
++}
++#endif /* CONFIG_BQL */
+
++struct igb_therm_proc_data {
++ struct e1000_hw *hw;
++ struct e1000_thermal_diode_data *sensor_data;
++};
++
++#ifdef IGB_HWMON
+ #define IGB_HWMON_TYPE_LOC 0
+ #define IGB_HWMON_TYPE_TEMP 1
+ #define IGB_HWMON_TYPE_CAUTION 2
+@@ -335,69 +501,79 @@
+ };
+
+ struct hwmon_buff {
+- struct attribute_group group;
+- const struct attribute_group *groups[2];
+- struct attribute *attrs[E1000_MAX_SENSORS * 4 + 1];
+- struct hwmon_attr hwmon_list[E1000_MAX_SENSORS * 4];
++ struct device *device;
++ struct hwmon_attr *hwmon_list;
+ unsigned int n_hwmon;
+ };
+-#endif
+-
++#endif /* IGB_HWMON */
++#ifdef ETHTOOL_GRXFHINDIR
+ #define IGB_RETA_SIZE 128
++#endif /* ETHTOOL_GRXFHINDIR */
+
+ /* board specific private data structure */
+ struct igb_adapter {
++#ifdef HAVE_VLAN_RX_REGISTER
++ /* vlgrp must be first member of structure */
++ struct vlan_group *vlgrp;
++#else
+ unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
+-
++#endif
+ struct net_device *netdev;
+
+ unsigned long state;
+ unsigned int flags;
+
+ unsigned int num_q_vectors;
+- struct msix_entry msix_entries[MAX_MSIX_ENTRIES];
++ struct msix_entry *msix_entries;
+
+- /* Interrupt Throttle Rate */
+- u32 rx_itr_setting;
+- u32 tx_itr_setting;
+- u16 tx_itr;
+- u16 rx_itr;
+
+ /* TX */
+ u16 tx_work_limit;
+ u32 tx_timeout_count;
+ int num_tx_queues;
+- struct igb_ring *tx_ring[16];
++ struct igb_ring *tx_ring[IGB_MAX_TX_QUEUES];
+
+ /* RX */
+ int num_rx_queues;
+- struct igb_ring *rx_ring[16];
+-
+- u32 max_frame_size;
+- u32 min_frame_size;
++ struct igb_ring *rx_ring[IGB_MAX_RX_QUEUES];
+
+ struct timer_list watchdog_timer;
++ struct timer_list dma_err_timer;
+ struct timer_list phy_info_timer;
+-
+ u16 mng_vlan_id;
+ u32 bd_number;
+ u32 wol;
+ u32 en_mng_pt;
+ u16 link_speed;
+ u16 link_duplex;
++ u8 port_num;
++
++ u8 __iomem *io_addr; /* for iounmap */
++
++ /* Interrupt Throttle Rate */
++ u32 rx_itr_setting;
++ u32 tx_itr_setting;
+
+ struct work_struct reset_task;
+ struct work_struct watchdog_task;
++ struct work_struct dma_err_task;
+ bool fc_autoneg;
+ u8 tx_timeout_factor;
+- struct timer_list blink_timer;
+- unsigned long led_status;
++
++#ifdef DEBUG
++ bool tx_hang_detected;
++ bool disable_hw_reset;
++#endif
++ u32 max_frame_size;
+
+ /* OS defined structs */
+ struct pci_dev *pdev;
+-
+- spinlock_t stats64_lock;
+- struct rtnl_link_stats64 stats64;
++#ifndef HAVE_NETDEV_STATS_IN_NETDEV
++ struct net_device_stats net_stats;
++#endif
++#ifndef IGB_NO_LRO
++ struct igb_lro_stats lro_stats;
++#endif
+
+ /* structs defined in e1000_hw.h */
+ struct e1000_hw hw;
+@@ -405,9 +581,11 @@
+ struct e1000_phy_info phy_info;
+ struct e1000_phy_stats phy_stats;
+
++#ifdef ETHTOOL_TEST
+ u32 test_icr;
+ struct igb_ring test_tx_ring;
+ struct igb_ring test_rx_ring;
++#endif
+
+ int msg_enable;
+
+@@ -416,15 +594,48 @@
+ u32 eims_other;
+
+ /* to not mess up cache alignment, always add to the bottom */
++ u32 *config_space;
+ u16 tx_ring_count;
+ u16 rx_ring_count;
+- unsigned int vfs_allocated_count;
+ struct vf_data_storage *vf_data;
++#ifdef IFLA_VF_MAX
+ int vf_rate_link_speed;
++#endif
++ u32 lli_port;
++ u32 lli_size;
++ unsigned int vfs_allocated_count;
++ /* Malicious Driver Detection flag. Valid only when SR-IOV is enabled */
++ bool mdd;
++ int int_mode;
+ u32 rss_queues;
++ u32 tss_queues;
++ u32 vmdq_pools;
++ char fw_version[32];
+ u32 wvbr;
++ struct igb_mac_addr *mac_table;
++#ifdef CONFIG_IGB_VMDQ_NETDEV
++ struct net_device *vmdq_netdev[IGB_MAX_VMDQ_QUEUES];
++#endif
++ int vferr_refcount;
++ int dmac;
+ u32 *shadow_vfta;
+
++ /* External Thermal Sensor support flag */
++ bool ets;
++#ifdef IGB_HWMON
++ struct hwmon_buff igb_hwmon_buff;
++#else /* IGB_HWMON */
++#ifdef IGB_PROCFS
++ struct proc_dir_entry *eth_dir;
++ struct proc_dir_entry *info_dir;
++ struct proc_dir_entry *therm_dir[E1000_MAX_SENSORS];
++ struct igb_therm_proc_data therm_data[E1000_MAX_SENSORS];
++ bool old_lsc;
++#endif /* IGB_PROCFS */
++#endif /* IGB_HWMON */
++ u32 etrack_id;
++
++#ifdef HAVE_PTP_1588_CLOCK
+ struct ptp_clock *ptp_clock;
+ struct ptp_clock_info ptp_caps;
+ struct delayed_work ptp_overflow_work;
+@@ -439,39 +650,57 @@
+ struct timecounter tc;
+ u32 tx_hwtstamp_timeouts;
+ u32 rx_hwtstamp_cleared;
++#endif /* HAVE_PTP_1588_CLOCK */
+
+- char fw_version[32];
+-#ifdef CONFIG_IGB_HWMON
+- struct hwmon_buff *igb_hwmon_buff;
+- bool ets;
+-#endif
++#ifdef HAVE_I2C_SUPPORT
+ struct i2c_algo_bit_data i2c_algo;
+ struct i2c_adapter i2c_adap;
+ struct i2c_client *i2c_client;
+- u32 rss_indir_tbl_init;
+- u8 rss_indir_tbl[IGB_RETA_SIZE];
+-
++#endif /* HAVE_I2C_SUPPORT */
+ unsigned long link_check_timeout;
++
++ int devrc;
++
+ int copper_tries;
+- struct e1000_info ei;
+ u16 eee_advert;
++#ifdef ETHTOOL_GRXFHINDIR
++ u32 rss_indir_tbl_init;
++ u8 rss_indir_tbl[IGB_RETA_SIZE];
++#endif
++};
++
++#ifdef CONFIG_IGB_VMDQ_NETDEV
++struct igb_vmdq_adapter {
++#ifdef HAVE_VLAN_RX_REGISTER
++ /* vlgrp must be first member of structure */
++ struct vlan_group *vlgrp;
++#else
++ unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
++#endif
++ struct igb_adapter *real_adapter;
++ struct net_device *vnetdev;
++ struct net_device_stats net_stats;
++ struct igb_ring *tx_ring;
++ struct igb_ring *rx_ring;
+ };
++#endif
+
+ #define IGB_FLAG_HAS_MSI (1 << 0)
+ #define IGB_FLAG_DCA_ENABLED (1 << 1)
+-#define IGB_FLAG_QUAD_PORT_A (1 << 2)
+-#define IGB_FLAG_QUEUE_PAIRS (1 << 3)
+-#define IGB_FLAG_DMAC (1 << 4)
+-#define IGB_FLAG_PTP (1 << 5)
+-#define IGB_FLAG_RSS_FIELD_IPV4_UDP (1 << 6)
+-#define IGB_FLAG_RSS_FIELD_IPV6_UDP (1 << 7)
+-#define IGB_FLAG_WOL_SUPPORTED (1 << 8)
+-#define IGB_FLAG_NEED_LINK_UPDATE (1 << 9)
+-#define IGB_FLAG_MEDIA_RESET (1 << 10)
+-#define IGB_FLAG_MAS_CAPABLE (1 << 11)
+-#define IGB_FLAG_MAS_ENABLE (1 << 12)
+-#define IGB_FLAG_HAS_MSIX (1 << 13)
+-#define IGB_FLAG_EEE (1 << 14)
++#define IGB_FLAG_LLI_PUSH (1 << 2)
++#define IGB_FLAG_QUAD_PORT_A (1 << 3)
++#define IGB_FLAG_QUEUE_PAIRS (1 << 4)
++#define IGB_FLAG_EEE (1 << 5)
++#define IGB_FLAG_DMAC (1 << 6)
++#define IGB_FLAG_DETECT_BAD_DMA (1 << 7)
++#define IGB_FLAG_PTP (1 << 8)
++#define IGB_FLAG_RSS_FIELD_IPV4_UDP (1 << 9)
++#define IGB_FLAG_RSS_FIELD_IPV6_UDP (1 << 10)
++#define IGB_FLAG_WOL_SUPPORTED (1 << 11)
++#define IGB_FLAG_NEED_LINK_UPDATE (1 << 12)
++#define IGB_FLAG_LOOPBACK_ENABLE (1 << 13)
++#define IGB_FLAG_MEDIA_RESET (1 << 14)
++#define IGB_FLAG_MAS_ENABLE (1 << 15)
+
+ /* Media Auto Sense */
+ #define IGB_MAS_ENABLE_0 0X0001
+@@ -479,13 +708,63 @@
+ #define IGB_MAS_ENABLE_2 0X0004
+ #define IGB_MAS_ENABLE_3 0X0008
+
++#define IGB_MIN_TXPBSIZE 20408
++#define IGB_TX_BUF_4096 4096
++
++#define IGB_DMCTLX_DCFLUSH_DIS 0x80000000 /* Disable DMA Coal Flush */
++
+ /* DMA Coalescing defines */
+-#define IGB_MIN_TXPBSIZE 20408
+-#define IGB_TX_BUF_4096 4096
+-#define IGB_DMCTLX_DCFLUSH_DIS 0x80000000 /* Disable DMA Coal Flush */
++#define IGB_DMAC_DISABLE 0
++#define IGB_DMAC_MIN 250
++#define IGB_DMAC_500 500
++#define IGB_DMAC_EN_DEFAULT 1000
++#define IGB_DMAC_2000 2000
++#define IGB_DMAC_3000 3000
++#define IGB_DMAC_4000 4000
++#define IGB_DMAC_5000 5000
++#define IGB_DMAC_6000 6000
++#define IGB_DMAC_7000 7000
++#define IGB_DMAC_8000 8000
++#define IGB_DMAC_9000 9000
++#define IGB_DMAC_MAX 10000
++
++#define IGB_82576_TSYNC_SHIFT 19
++#define IGB_82580_TSYNC_SHIFT 24
++#define IGB_TS_HDR_LEN 16
++
++/* CEM Support */
++#define FW_HDR_LEN 0x4
++#define FW_CMD_DRV_INFO 0xDD
++#define FW_CMD_DRV_INFO_LEN 0x5
++#define FW_CMD_RESERVED 0X0
++#define FW_RESP_SUCCESS 0x1
++#define FW_UNUSED_VER 0x0
++#define FW_MAX_RETRIES 3
++#define FW_STATUS_SUCCESS 0x1
++#define FW_FAMILY_DRV_VER 0Xffffffff
++
++#define IGB_MAX_LINK_TRIES 20
++
++struct e1000_fw_hdr {
++ u8 cmd;
++ u8 buf_len;
++ union {
++ u8 cmd_resv;
++ u8 ret_status;
++ } cmd_or_resp;
++ u8 checksum;
++};
++
++#pragma pack(push, 1)
++struct e1000_fw_drv_info {
++ struct e1000_fw_hdr hdr;
++ u8 port_num;
++ u32 drv_version;
++ u16 pad; /* end spacing to ensure length is mult. of dword */
++ u8 pad2; /* end spacing to ensure length is mult. of dword2 */
++};
++#pragma pack(pop)
+
+-#define IGB_82576_TSYNC_SHIFT 19
+-#define IGB_TS_HDR_LEN 16
+ enum e1000_state_t {
+ __IGB_TESTING,
+ __IGB_RESETTING,
+@@ -493,85 +772,82 @@
+ __IGB_PTP_TX_IN_PROGRESS,
+ };
+
+-enum igb_boards {
+- board_82575,
+-};
+-
+ extern char igb_driver_name[];
+ extern char igb_driver_version[];
+
+-int igb_up(struct igb_adapter *);
+-void igb_down(struct igb_adapter *);
+-void igb_reinit_locked(struct igb_adapter *);
+-void igb_reset(struct igb_adapter *);
+-int igb_reinit_queues(struct igb_adapter *);
+-void igb_write_rss_indir_tbl(struct igb_adapter *);
+-int igb_set_spd_dplx(struct igb_adapter *, u32, u8);
+-int igb_setup_tx_resources(struct igb_ring *);
+-int igb_setup_rx_resources(struct igb_ring *);
+-void igb_free_tx_resources(struct igb_ring *);
+-void igb_free_rx_resources(struct igb_ring *);
+-void igb_configure_tx_ring(struct igb_adapter *, struct igb_ring *);
+-void igb_configure_rx_ring(struct igb_adapter *, struct igb_ring *);
+-void igb_setup_tctl(struct igb_adapter *);
+-void igb_setup_rctl(struct igb_adapter *);
+-netdev_tx_t igb_xmit_frame_ring(struct sk_buff *, struct igb_ring *);
+-void igb_unmap_and_free_tx_resource(struct igb_ring *, struct igb_tx_buffer *);
+-void igb_alloc_rx_buffers(struct igb_ring *, u16);
+-void igb_update_stats(struct igb_adapter *, struct rtnl_link_stats64 *);
+-bool igb_has_link(struct igb_adapter *adapter);
+-void igb_set_ethtool_ops(struct net_device *);
+-void igb_power_up_link(struct igb_adapter *);
+-void igb_set_fw_version(struct igb_adapter *);
+-void igb_ptp_init(struct igb_adapter *adapter);
+-void igb_ptp_stop(struct igb_adapter *adapter);
+-void igb_ptp_reset(struct igb_adapter *adapter);
+-void igb_ptp_rx_hang(struct igb_adapter *adapter);
+-void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector, struct sk_buff *skb);
+-void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, unsigned char *va,
+- struct sk_buff *skb);
++extern int igb_open(struct net_device *netdev);
++extern int igb_close(struct net_device *netdev);
++extern int igb_up(struct igb_adapter *);
++extern void igb_down(struct igb_adapter *);
++extern void igb_reinit_locked(struct igb_adapter *);
++extern void igb_reset(struct igb_adapter *);
++extern int igb_reinit_queues(struct igb_adapter *);
++#ifdef ETHTOOL_SRXFHINDIR
++extern void igb_write_rss_indir_tbl(struct igb_adapter *);
++#endif
++extern int igb_set_spd_dplx(struct igb_adapter *, u16);
++extern int igb_setup_tx_resources(struct igb_ring *);
++extern int igb_setup_rx_resources(struct igb_ring *);
++extern void igb_free_tx_resources(struct igb_ring *);
++extern void igb_free_rx_resources(struct igb_ring *);
++extern void igb_configure_tx_ring(struct igb_adapter *, struct igb_ring *);
++extern void igb_configure_rx_ring(struct igb_adapter *, struct igb_ring *);
++extern void igb_setup_tctl(struct igb_adapter *);
++extern void igb_setup_rctl(struct igb_adapter *);
++extern netdev_tx_t igb_xmit_frame_ring(struct sk_buff *, struct igb_ring *);
++extern void igb_unmap_and_free_tx_resource(struct igb_ring *,
++ struct igb_tx_buffer *);
++extern void igb_alloc_rx_buffers(struct igb_ring *, u16);
++extern void igb_clean_rx_ring(struct igb_ring *);
++extern int igb_setup_queues(struct igb_adapter *adapter);
++extern void igb_update_stats(struct igb_adapter *);
++extern bool igb_has_link(struct igb_adapter *adapter);
++extern void igb_set_ethtool_ops(struct net_device *);
++extern void igb_check_options(struct igb_adapter *);
++extern void igb_power_up_link(struct igb_adapter *);
++#ifdef HAVE_PTP_1588_CLOCK
++extern void igb_ptp_init(struct igb_adapter *adapter);
++extern void igb_ptp_stop(struct igb_adapter *adapter);
++extern void igb_ptp_reset(struct igb_adapter *adapter);
++extern void igb_ptp_tx_work(struct work_struct *work);
++extern void igb_ptp_rx_hang(struct igb_adapter *adapter);
++extern void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter);
++extern void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector,
++ struct sk_buff *skb);
++extern void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector,
++ unsigned char *va,
++ struct sk_buff *skb);
++extern int igb_ptp_hwtstamp_ioctl(struct net_device *netdev,
++ struct ifreq *ifr, int cmd);
++#endif /* HAVE_PTP_1588_CLOCK */
++#ifdef ETHTOOL_OPS_COMPAT
++extern int ethtool_ioctl(struct ifreq *);
++#endif
++extern int igb_write_mc_addr_list(struct net_device *netdev);
++extern int igb_add_mac_filter(struct igb_adapter *adapter, u8 *addr, u16 queue);
++extern int igb_del_mac_filter(struct igb_adapter *adapter, u8 *addr, u16 queue);
++extern int igb_available_rars(struct igb_adapter *adapter);
++extern s32 igb_vlvf_set(struct igb_adapter *, u32, bool, u32);
++extern void igb_configure_vt_default_pool(struct igb_adapter *adapter);
++extern void igb_enable_vlan_tags(struct igb_adapter *adapter);
++#ifndef HAVE_VLAN_RX_REGISTER
++extern void igb_vlan_mode(struct net_device *, u32);
++#endif
++
++#define E1000_PCS_CFG_IGN_SD 1
++
+ int igb_ptp_set_ts_config(struct net_device *netdev, struct ifreq *ifr);
+ int igb_ptp_get_ts_config(struct net_device *netdev, struct ifreq *ifr);
+-void igb_set_flag_queue_pairs(struct igb_adapter *, const u32);
+-#ifdef CONFIG_IGB_HWMON
++#ifdef IGB_HWMON
+ void igb_sysfs_exit(struct igb_adapter *adapter);
+ int igb_sysfs_init(struct igb_adapter *adapter);
+-#endif
+-static inline s32 igb_reset_phy(struct e1000_hw *hw)
+-{
+- if (hw->phy.ops.reset)
+- return hw->phy.ops.reset(hw);
+-
+- return 0;
+-}
+-
+-static inline s32 igb_read_phy_reg(struct e1000_hw *hw, u32 offset, u16 *data)
+-{
+- if (hw->phy.ops.read_reg)
+- return hw->phy.ops.read_reg(hw, offset, data);
+-
+- return 0;
+-}
+-
+-static inline s32 igb_write_phy_reg(struct e1000_hw *hw, u32 offset, u16 data)
+-{
+- if (hw->phy.ops.write_reg)
+- return hw->phy.ops.write_reg(hw, offset, data);
+-
+- return 0;
+-}
+-
+-static inline s32 igb_get_phy_info(struct e1000_hw *hw)
+-{
+- if (hw->phy.ops.get_phy_info)
+- return hw->phy.ops.get_phy_info(hw);
+-
+- return 0;
+-}
+-
+-static inline struct netdev_queue *txring_txq(const struct igb_ring *tx_ring)
+-{
+- return netdev_get_tx_queue(tx_ring->netdev, tx_ring->queue_index);
+-}
++#else
++#ifdef IGB_PROCFS
++int igb_procfs_init(struct igb_adapter *adapter);
++void igb_procfs_exit(struct igb_adapter *adapter);
++int igb_procfs_topdir_init(void);
++void igb_procfs_topdir_exit(void);
++#endif /* IGB_PROCFS */
++#endif /* IGB_HWMON */
+
+ #endif /* _IGB_H_ */
+diff -Nu a/drivers/net/ethernet/intel/igb/igb_debugfs.c b/drivers/net/ethernet/intel/igb/igb_debugfs.c
+--- a/drivers/net/ethernet/intel/igb/igb_debugfs.c 1970-01-01 00:00:00.000000000 +0000
++++ b/drivers/net/ethernet/intel/igb/igb_debugfs.c 2016-11-14 14:32:08.579567168 +0000
+@@ -0,0 +1,26 @@
++/*******************************************************************************
++
++ Intel(R) Gigabit Ethernet Linux driver
++ Copyright(c) 2007-2015 Intel Corporation.
++
++ This program is free software; you can redistribute it and/or modify it
++ under the terms and conditions of the GNU General Public License,
++ version 2, as published by the Free Software Foundation.
++
++ This program is distributed in the hope it will be useful, but WITHOUT
++ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ more details.
++
++ The full GNU General Public License is included in this distribution in
++ the file called "COPYING".
++
++ Contact Information:
++ Linux NICS
++ e1000-devel Mailing List
++ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
++
++*******************************************************************************/
++
++#include "igb.h"
++
+diff -Nu a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
+--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c 2016-11-13 09:20:24.790171605 +0000
++++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c 2016-11-14 14:32:08.579567168 +0000
+@@ -1,43 +1,50 @@
+-/* Intel(R) Gigabit Ethernet Linux driver
+- * Copyright(c) 2007-2014 Intel Corporation.
+- *
+- * This program is free software; you can redistribute it and/or modify it
+- * under the terms and conditions of the GNU General Public License,
+- * version 2, as published by the Free Software Foundation.
+- *
+- * This program is distributed in the hope it will be useful, but WITHOUT
+- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+- * more details.
+- *
+- * You should have received a copy of the GNU General Public License along with
+- * this program; if not, see .
+- *
+- * The full GNU General Public License is included in this distribution in
+- * the file called "COPYING".
+- *
+- * Contact Information:
+- * e1000-devel Mailing List
+- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+- */
++/*******************************************************************************
++
++ Intel(R) Gigabit Ethernet Linux driver
++ Copyright(c) 2007-2015 Intel Corporation.
++
++ This program is free software; you can redistribute it and/or modify it
++ under the terms and conditions of the GNU General Public License,
++ version 2, as published by the Free Software Foundation.
++
++ This program is distributed in the hope it will be useful, but WITHOUT
++ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ more details.
++
++ The full GNU General Public License is included in this distribution in
++ the file called "COPYING".
++
++ Contact Information:
++ Linux NICS
++ e1000-devel Mailing List
++ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
++
++*******************************************************************************/
+
+ /* ethtool support for igb */
+
+-#include
+ #include
+-#include
+-#include
+-#include
+-#include
++#include
++
++#ifdef SIOCETHTOOL
+ #include
+-#include
+-#include
++#ifdef CONFIG_PM_RUNTIME
+ #include
++#endif /* CONFIG_PM_RUNTIME */
+ #include
+-#include
+
+ #include "igb.h"
++#include "igb_regtest.h"
++#include
++#ifdef ETHTOOL_GEEE
++#include
++#endif
+
++#ifdef ETHTOOL_OPS_COMPAT
++#include "kcompat_ethtool.c"
++#endif
++#ifdef ETHTOOL_GSTATS
+ struct igb_stats {
+ char stat_string[ETH_GSTRING_LEN];
+ int sizeof_stat;
+@@ -49,6 +56,7 @@
+ .sizeof_stat = FIELD_SIZEOF(struct igb_adapter, _stat), \
+ .stat_offset = offsetof(struct igb_adapter, _stat) \
+ }
++
+ static const struct igb_stats igb_gstrings_stats[] = {
+ IGB_STAT("rx_packets", stats.gprc),
+ IGB_STAT("tx_packets", stats.gptc),
+@@ -82,6 +90,10 @@
+ IGB_STAT("tx_flow_control_xoff", stats.xofftxc),
+ IGB_STAT("rx_long_byte_count", stats.gorc),
+ IGB_STAT("tx_dma_out_of_sync", stats.doosync),
++#ifndef IGB_NO_LRO
++ IGB_STAT("lro_aggregated", lro_stats.coal),
++ IGB_STAT("lro_flushed", lro_stats.flushed),
++#endif /* IGB_LRO */
+ IGB_STAT("tx_smbus", stats.mgptc),
+ IGB_STAT("rx_smbus", stats.mgprc),
+ IGB_STAT("dropped_smbus", stats.mgpdc),
+@@ -89,15 +101,18 @@
+ IGB_STAT("os2bmc_tx_by_bmc", stats.b2ospc),
+ IGB_STAT("os2bmc_tx_by_host", stats.o2bspc),
+ IGB_STAT("os2bmc_rx_by_host", stats.b2ogprc),
++#ifdef HAVE_PTP_1588_CLOCK
+ IGB_STAT("tx_hwtstamp_timeouts", tx_hwtstamp_timeouts),
+ IGB_STAT("rx_hwtstamp_cleared", rx_hwtstamp_cleared),
++#endif /* HAVE_PTP_1588_CLOCK */
+ };
+
+ #define IGB_NETDEV_STAT(_net_stat) { \
+- .stat_string = __stringify(_net_stat), \
+- .sizeof_stat = FIELD_SIZEOF(struct rtnl_link_stats64, _net_stat), \
+- .stat_offset = offsetof(struct rtnl_link_stats64, _net_stat) \
++ .stat_string = #_net_stat, \
++ .sizeof_stat = FIELD_SIZEOF(struct net_device_stats, _net_stat), \
++ .stat_offset = offsetof(struct net_device_stats, _net_stat) \
+ }
++
+ static const struct igb_stats igb_gstrings_net_stats[] = {
+ IGB_NETDEV_STAT(rx_errors),
+ IGB_NETDEV_STAT(tx_errors),
+@@ -110,15 +125,12 @@
+ IGB_NETDEV_STAT(tx_heartbeat_errors)
+ };
+
+-#define IGB_GLOBAL_STATS_LEN \
+- (sizeof(igb_gstrings_stats) / sizeof(struct igb_stats))
+-#define IGB_NETDEV_STATS_LEN \
+- (sizeof(igb_gstrings_net_stats) / sizeof(struct igb_stats))
++#define IGB_GLOBAL_STATS_LEN ARRAY_SIZE(igb_gstrings_stats)
++#define IGB_NETDEV_STATS_LEN ARRAY_SIZE(igb_gstrings_net_stats)
+ #define IGB_RX_QUEUE_STATS_LEN \
+ (sizeof(struct igb_rx_queue_stats) / sizeof(u64))
+-
+-#define IGB_TX_QUEUE_STATS_LEN 3 /* packets, bytes, restart_queue */
+-
++#define IGB_TX_QUEUE_STATS_LEN \
++ (sizeof(struct igb_tx_queue_stats) / sizeof(u64))
+ #define IGB_QUEUE_STATS_LEN \
+ ((((struct igb_adapter *)netdev_priv(netdev))->num_rx_queues * \
+ IGB_RX_QUEUE_STATS_LEN) + \
+@@ -127,23 +139,23 @@
+ #define IGB_STATS_LEN \
+ (IGB_GLOBAL_STATS_LEN + IGB_NETDEV_STATS_LEN + IGB_QUEUE_STATS_LEN)
+
++#endif /* ETHTOOL_GSTATS */
++#ifdef ETHTOOL_TEST
+ static const char igb_gstrings_test[][ETH_GSTRING_LEN] = {
+ "Register test (offline)", "Eeprom test (offline)",
+ "Interrupt test (offline)", "Loopback test (offline)",
+ "Link test (on/offline)"
+ };
++
+ #define IGB_TEST_LEN (sizeof(igb_gstrings_test) / ETH_GSTRING_LEN)
++#endif /* ETHTOOL_TEST */
+
+ static int igb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
+ {
+ struct igb_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+- struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575;
+- struct e1000_sfp_flags *eth_flags = &dev_spec->eth_flags;
+ u32 status;
+- u32 speed;
+
+- status = rd32(E1000_STATUS);
+ if (hw->phy.media_type == e1000_media_type_copper) {
+
+ ecmd->supported = (SUPPORTED_10baseT_Half |
+@@ -165,80 +177,85 @@
+ ecmd->port = PORT_TP;
+ ecmd->phy_address = hw->phy.addr;
+ ecmd->transceiver = XCVR_INTERNAL;
++
+ } else {
+- ecmd->supported = (SUPPORTED_FIBRE |
+- SUPPORTED_1000baseKX_Full |
++ ecmd->supported = (SUPPORTED_1000baseT_Full |
++ SUPPORTED_100baseT_Full |
++ SUPPORTED_FIBRE |
+ SUPPORTED_Autoneg |
+ SUPPORTED_Pause);
+- ecmd->advertising = (ADVERTISED_FIBRE |
+- ADVERTISED_1000baseKX_Full);
+- if (hw->mac.type == e1000_i354) {
+- if ((hw->device_id ==
+- E1000_DEV_ID_I354_BACKPLANE_2_5GBPS) &&
+- !(status & E1000_STATUS_2P5_SKU_OVER)) {
+- ecmd->supported |= SUPPORTED_2500baseX_Full;
+- ecmd->supported &=
+- ~SUPPORTED_1000baseKX_Full;
+- ecmd->advertising |= ADVERTISED_2500baseX_Full;
+- ecmd->advertising &=
+- ~ADVERTISED_1000baseKX_Full;
+- }
+- }
+- if (eth_flags->e100_base_fx) {
+- ecmd->supported |= SUPPORTED_100baseT_Full;
+- ecmd->advertising |= ADVERTISED_100baseT_Full;
++ if (hw->mac.type == e1000_i354)
++ ecmd->supported |= (SUPPORTED_2500baseX_Full);
++
++ ecmd->advertising = ADVERTISED_FIBRE;
++
++ switch (adapter->link_speed) {
++ case SPEED_2500:
++ ecmd->advertising = ADVERTISED_2500baseX_Full;
++ break;
++ case SPEED_1000:
++ ecmd->advertising = ADVERTISED_1000baseT_Full;
++ break;
++ case SPEED_100:
++ ecmd->advertising = ADVERTISED_100baseT_Full;
++ break;
++ default:
++ break;
+ }
++
+ if (hw->mac.autoneg == 1)
+ ecmd->advertising |= ADVERTISED_Autoneg;
+
+ ecmd->port = PORT_FIBRE;
+ ecmd->transceiver = XCVR_EXTERNAL;
+ }
++
+ if (hw->mac.autoneg != 1)
+ ecmd->advertising &= ~(ADVERTISED_Pause |
+ ADVERTISED_Asym_Pause);
+
+- switch (hw->fc.requested_mode) {
+- case e1000_fc_full:
++ if (hw->fc.requested_mode == e1000_fc_full)
+ ecmd->advertising |= ADVERTISED_Pause;
+- break;
+- case e1000_fc_rx_pause:
++ else if (hw->fc.requested_mode == e1000_fc_rx_pause)
+ ecmd->advertising |= (ADVERTISED_Pause |
+ ADVERTISED_Asym_Pause);
+- break;
+- case e1000_fc_tx_pause:
++ else if (hw->fc.requested_mode == e1000_fc_tx_pause)
+ ecmd->advertising |= ADVERTISED_Asym_Pause;
+- break;
+- default:
++ else
+ ecmd->advertising &= ~(ADVERTISED_Pause |
+ ADVERTISED_Asym_Pause);
+- }
++
++ status = E1000_READ_REG(hw, E1000_STATUS);
++
+ if (status & E1000_STATUS_LU) {
+- if ((status & E1000_STATUS_2P5_SKU) &&
+- !(status & E1000_STATUS_2P5_SKU_OVER)) {
+- speed = SPEED_2500;
+- } else if (status & E1000_STATUS_SPEED_1000) {
+- speed = SPEED_1000;
+- } else if (status & E1000_STATUS_SPEED_100) {
+- speed = SPEED_100;
+- } else {
+- speed = SPEED_10;
+- }
++ if ((hw->mac.type == e1000_i354) &&
++ (status & E1000_STATUS_2P5_SKU) &&
++ !(status & E1000_STATUS_2P5_SKU_OVER))
++ ethtool_cmd_speed_set(ecmd, SPEED_2500);
++ else if (status & E1000_STATUS_SPEED_1000)
++ ethtool_cmd_speed_set(ecmd, SPEED_1000);
++ else if (status & E1000_STATUS_SPEED_100)
++ ethtool_cmd_speed_set(ecmd, SPEED_100);
++ else
++ ethtool_cmd_speed_set(ecmd, SPEED_10);
++
+ if ((status & E1000_STATUS_FD) ||
+ hw->phy.media_type != e1000_media_type_copper)
+ ecmd->duplex = DUPLEX_FULL;
+ else
+ ecmd->duplex = DUPLEX_HALF;
++
+ } else {
+- speed = SPEED_UNKNOWN;
+- ecmd->duplex = DUPLEX_UNKNOWN;
++ ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
++ ecmd->duplex = -1;
+ }
+- ethtool_cmd_speed_set(ecmd, speed);
++
+ if ((hw->phy.media_type == e1000_media_type_fiber) ||
+ hw->mac.autoneg)
+ ecmd->autoneg = AUTONEG_ENABLE;
+ else
+ ecmd->autoneg = AUTONEG_DISABLE;
++#ifdef ETH_TP_MDI_X
+
+ /* MDI-X => 2; MDI =>1; Invalid =>0 */
+ if (hw->phy.media_type == e1000_media_type_copper)
+@@ -247,11 +264,14 @@
+ else
+ ecmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
+
++#ifdef ETH_TP_MDI_AUTO
+ if (hw->phy.mdix == AUTO_ALL_MODES)
+ ecmd->eth_tp_mdix_ctrl = ETH_TP_MDI_AUTO;
+ else
+ ecmd->eth_tp_mdix_ctrl = hw->phy.mdix;
+
++#endif
++#endif /* ETH_TP_MDI_X */
+ return 0;
+ }
+
+@@ -260,16 +280,26 @@
+ struct igb_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+
++ if (ecmd->duplex == DUPLEX_HALF) {
++ if (!hw->dev_spec._82575.eee_disable)
++ dev_info(pci_dev_to_dev(adapter->pdev), "EEE disabled: not supported with half duplex\n");
++ hw->dev_spec._82575.eee_disable = true;
++ } else {
++ if (hw->dev_spec._82575.eee_disable)
++ dev_info(pci_dev_to_dev(adapter->pdev), "EEE enabled\n");
++ hw->dev_spec._82575.eee_disable = false;
++ }
++
+ /* When SoL/IDER sessions are active, autoneg/speed/duplex
+- * cannot be changed
+- */
+- if (igb_check_reset_block(hw)) {
+- dev_err(&adapter->pdev->dev,
+- "Cannot change link characteristics when SoL/IDER is active.\n");
++ * cannot be changed */
++ if (e1000_check_reset_block(hw)) {
++ dev_err(pci_dev_to_dev(adapter->pdev), "Cannot change link characteristics when SoL/IDER is active.\n");
+ return -EINVAL;
+ }
+
+- /* MDI setting is only allowed when autoneg enabled because
++#ifdef ETH_TP_MDI_AUTO
++ /*
++ * MDI setting is only allowed when autoneg enabled because
+ * some hardware doesn't allow MDI setting when speed or
+ * duplex is forced.
+ */
+@@ -284,6 +314,7 @@
+ }
+ }
+
++#endif /* ETH_TP_MDI_AUTO */
+ while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
+ usleep_range(1000, 2000);
+
+@@ -318,14 +349,13 @@
+ if (adapter->fc_autoneg)
+ hw->fc.requested_mode = e1000_fc_default;
+ } else {
+- u32 speed = ethtool_cmd_speed(ecmd);
+- /* calling this overrides forced MDI setting */
+- if (igb_set_spd_dplx(adapter, speed, ecmd->duplex)) {
++ if (igb_set_spd_dplx(adapter, ecmd->speed + ecmd->duplex)) {
+ clear_bit(__IGB_RESETTING, &adapter->state);
+ return -EINVAL;
+ }
+ }
+
++#ifdef ETH_TP_MDI_AUTO
+ /* MDI-X => 2; MDI => 1; Auto => 3 */
+ if (ecmd->eth_tp_mdix_ctrl) {
+ /* fix up the value for auto (3 => 0) as zero is mapped
+@@ -337,6 +367,7 @@
+ hw->phy.mdix = ecmd->eth_tp_mdix_ctrl;
+ }
+
++#endif /* ETH_TP_MDI_AUTO */
+ /* reset the link */
+ if (netif_running(adapter->netdev)) {
+ igb_down(adapter);
+@@ -353,7 +384,8 @@
+ struct igb_adapter *adapter = netdev_priv(netdev);
+ struct e1000_mac_info *mac = &adapter->hw.mac;
+
+- /* If the link is not reported up to netdev, interrupts are disabled,
++ /*
++ * If the link is not reported up to netdev, interrupts are disabled,
+ * and so the physical link state may have changed since we last
+ * looked. Set get_link_status to make sure that the true link
+ * state is interrogated, rather than pulling a cached and possibly
+@@ -391,10 +423,6 @@
+ struct e1000_hw *hw = &adapter->hw;
+ int retval = 0;
+
+- /* 100basefx does not support setting link flow control */
+- if (hw->dev_spec._82575.eth_flags.e100_base_fx)
+- return -EINVAL;
+-
+ adapter->fc_autoneg = pause->autoneg;
+
+ while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
+@@ -420,10 +448,18 @@
+
+ hw->fc.current_mode = hw->fc.requested_mode;
+
+- retval = ((hw->phy.media_type == e1000_media_type_copper) ?
+- igb_force_mac_fc(hw) : igb_setup_link(hw));
++ if (hw->phy.media_type == e1000_media_type_fiber) {
++ retval = hw->mac.ops.setup_link(hw);
++ /* implicit goto out */
++ } else {
++ retval = igb_e1000_force_mac_fc(hw);
++ if (retval)
++ goto out;
++ e1000_set_fc_watermarks_generic(hw);
++ }
+ }
+
++out:
+ clear_bit(__IGB_RESETTING, &adapter->state);
+ return retval;
+ }
+@@ -442,7 +478,7 @@
+
+ static int igb_get_regs_len(struct net_device *netdev)
+ {
+-#define IGB_REGS_LEN 739
++#define IGB_REGS_LEN 555
+ return IGB_REGS_LEN * sizeof(u32);
+ }
+
+@@ -459,80 +495,78 @@
+ regs->version = (1 << 24) | (hw->revision_id << 16) | hw->device_id;
+
+ /* General Registers */
+- regs_buff[0] = rd32(E1000_CTRL);
+- regs_buff[1] = rd32(E1000_STATUS);
+- regs_buff[2] = rd32(E1000_CTRL_EXT);
+- regs_buff[3] = rd32(E1000_MDIC);
+- regs_buff[4] = rd32(E1000_SCTL);
+- regs_buff[5] = rd32(E1000_CONNSW);
+- regs_buff[6] = rd32(E1000_VET);
+- regs_buff[7] = rd32(E1000_LEDCTL);
+- regs_buff[8] = rd32(E1000_PBA);
+- regs_buff[9] = rd32(E1000_PBS);
+- regs_buff[10] = rd32(E1000_FRTIMER);
+- regs_buff[11] = rd32(E1000_TCPTIMER);
++ regs_buff[0] = E1000_READ_REG(hw, E1000_CTRL);
++ regs_buff[1] = E1000_READ_REG(hw, E1000_STATUS);
++ regs_buff[2] = E1000_READ_REG(hw, E1000_CTRL_EXT);
++ regs_buff[3] = E1000_READ_REG(hw, E1000_MDIC);
++ regs_buff[4] = E1000_READ_REG(hw, E1000_SCTL);
++ regs_buff[5] = E1000_READ_REG(hw, E1000_CONNSW);
++ regs_buff[6] = E1000_READ_REG(hw, E1000_VET);
++ regs_buff[7] = E1000_READ_REG(hw, E1000_LEDCTL);
++ regs_buff[8] = E1000_READ_REG(hw, E1000_PBA);
++ regs_buff[9] = E1000_READ_REG(hw, E1000_PBS);
++ regs_buff[10] = E1000_READ_REG(hw, E1000_FRTIMER);
++ regs_buff[11] = E1000_READ_REG(hw, E1000_TCPTIMER);
+
+ /* NVM Register */
+- regs_buff[12] = rd32(E1000_EECD);
++ regs_buff[12] = E1000_READ_REG(hw, E1000_EECD);
+
+ /* Interrupt */
+ /* Reading EICS for EICR because they read the
+- * same but EICS does not clear on read
+- */
+- regs_buff[13] = rd32(E1000_EICS);
+- regs_buff[14] = rd32(E1000_EICS);
+- regs_buff[15] = rd32(E1000_EIMS);
+- regs_buff[16] = rd32(E1000_EIMC);
+- regs_buff[17] = rd32(E1000_EIAC);
+- regs_buff[18] = rd32(E1000_EIAM);
++ * same but EICS does not clear on read */
++ regs_buff[13] = E1000_READ_REG(hw, E1000_EICS);
++ regs_buff[14] = E1000_READ_REG(hw, E1000_EICS);
++ regs_buff[15] = E1000_READ_REG(hw, E1000_EIMS);
++ regs_buff[16] = E1000_READ_REG(hw, E1000_EIMC);
++ regs_buff[17] = E1000_READ_REG(hw, E1000_EIAC);
++ regs_buff[18] = E1000_READ_REG(hw, E1000_EIAM);
+ /* Reading ICS for ICR because they read the
+- * same but ICS does not clear on read
+- */
+- regs_buff[19] = rd32(E1000_ICS);
+- regs_buff[20] = rd32(E1000_ICS);
+- regs_buff[21] = rd32(E1000_IMS);
+- regs_buff[22] = rd32(E1000_IMC);
+- regs_buff[23] = rd32(E1000_IAC);
+- regs_buff[24] = rd32(E1000_IAM);
+- regs_buff[25] = rd32(E1000_IMIRVP);
++ * same but ICS does not clear on read */
++ regs_buff[19] = E1000_READ_REG(hw, E1000_ICS);
++ regs_buff[20] = E1000_READ_REG(hw, E1000_ICS);
++ regs_buff[21] = E1000_READ_REG(hw, E1000_IMS);
++ regs_buff[22] = E1000_READ_REG(hw, E1000_IMC);
++ regs_buff[23] = E1000_READ_REG(hw, E1000_IAC);
++ regs_buff[24] = E1000_READ_REG(hw, E1000_IAM);
++ regs_buff[25] = E1000_READ_REG(hw, E1000_IMIRVP);
+
+ /* Flow Control */
+- regs_buff[26] = rd32(E1000_FCAL);
+- regs_buff[27] = rd32(E1000_FCAH);
+- regs_buff[28] = rd32(E1000_FCTTV);
+- regs_buff[29] = rd32(E1000_FCRTL);
+- regs_buff[30] = rd32(E1000_FCRTH);
+- regs_buff[31] = rd32(E1000_FCRTV);
++ regs_buff[26] = E1000_READ_REG(hw, E1000_FCAL);
++ regs_buff[27] = E1000_READ_REG(hw, E1000_FCAH);
++ regs_buff[28] = E1000_READ_REG(hw, E1000_FCTTV);
++ regs_buff[29] = E1000_READ_REG(hw, E1000_FCRTL);
++ regs_buff[30] = E1000_READ_REG(hw, E1000_FCRTH);
++ regs_buff[31] = E1000_READ_REG(hw, E1000_FCRTV);
+
+ /* Receive */
+- regs_buff[32] = rd32(E1000_RCTL);
+- regs_buff[33] = rd32(E1000_RXCSUM);
+- regs_buff[34] = rd32(E1000_RLPML);
+- regs_buff[35] = rd32(E1000_RFCTL);
+- regs_buff[36] = rd32(E1000_MRQC);
+- regs_buff[37] = rd32(E1000_VT_CTL);
++ regs_buff[32] = E1000_READ_REG(hw, E1000_RCTL);
++ regs_buff[33] = E1000_READ_REG(hw, E1000_RXCSUM);
++ regs_buff[34] = E1000_READ_REG(hw, E1000_RLPML);
++ regs_buff[35] = E1000_READ_REG(hw, E1000_RFCTL);
++ regs_buff[36] = E1000_READ_REG(hw, E1000_MRQC);
++ regs_buff[37] = E1000_READ_REG(hw, E1000_VT_CTL);
+
+ /* Transmit */
+- regs_buff[38] = rd32(E1000_TCTL);
+- regs_buff[39] = rd32(E1000_TCTL_EXT);
+- regs_buff[40] = rd32(E1000_TIPG);
+- regs_buff[41] = rd32(E1000_DTXCTL);
++ regs_buff[38] = E1000_READ_REG(hw, E1000_TCTL);
++ regs_buff[39] = E1000_READ_REG(hw, E1000_TCTL_EXT);
++ regs_buff[40] = E1000_READ_REG(hw, E1000_TIPG);
++ regs_buff[41] = E1000_READ_REG(hw, E1000_DTXCTL);
+
+ /* Wake Up */
+- regs_buff[42] = rd32(E1000_WUC);
+- regs_buff[43] = rd32(E1000_WUFC);
+- regs_buff[44] = rd32(E1000_WUS);
+- regs_buff[45] = rd32(E1000_IPAV);
+- regs_buff[46] = rd32(E1000_WUPL);
++ regs_buff[42] = E1000_READ_REG(hw, E1000_WUC);
++ regs_buff[43] = E1000_READ_REG(hw, E1000_WUFC);
++ regs_buff[44] = E1000_READ_REG(hw, E1000_WUS);
++ regs_buff[45] = E1000_READ_REG(hw, E1000_IPAV);
++ regs_buff[46] = E1000_READ_REG(hw, E1000_WUPL);
+
+ /* MAC */
+- regs_buff[47] = rd32(E1000_PCS_CFG0);
+- regs_buff[48] = rd32(E1000_PCS_LCTL);
+- regs_buff[49] = rd32(E1000_PCS_LSTAT);
+- regs_buff[50] = rd32(E1000_PCS_ANADV);
+- regs_buff[51] = rd32(E1000_PCS_LPAB);
+- regs_buff[52] = rd32(E1000_PCS_NPTX);
+- regs_buff[53] = rd32(E1000_PCS_LPABNP);
++ regs_buff[47] = E1000_READ_REG(hw, E1000_PCS_CFG0);
++ regs_buff[48] = E1000_READ_REG(hw, E1000_PCS_LCTL);
++ regs_buff[49] = E1000_READ_REG(hw, E1000_PCS_LSTAT);
++ regs_buff[50] = E1000_READ_REG(hw, E1000_PCS_ANADV);
++ regs_buff[51] = E1000_READ_REG(hw, E1000_PCS_LPAB);
++ regs_buff[52] = E1000_READ_REG(hw, E1000_PCS_NPTX);
++ regs_buff[53] = E1000_READ_REG(hw, E1000_PCS_LPABNP);
+
+ /* Statistics */
+ regs_buff[54] = adapter->stats.crcerrs;
+@@ -598,112 +632,75 @@
+ regs_buff[120] = adapter->stats.hrmpc;
+
+ for (i = 0; i < 4; i++)
+- regs_buff[121 + i] = rd32(E1000_SRRCTL(i));
++ regs_buff[121 + i] = E1000_READ_REG(hw, E1000_SRRCTL(i));
+ for (i = 0; i < 4; i++)
+- regs_buff[125 + i] = rd32(E1000_PSRTYPE(i));
++ regs_buff[125 + i] = E1000_READ_REG(hw, E1000_PSRTYPE(i));
+ for (i = 0; i < 4; i++)
+- regs_buff[129 + i] = rd32(E1000_RDBAL(i));
++ regs_buff[129 + i] = E1000_READ_REG(hw, E1000_RDBAL(i));
+ for (i = 0; i < 4; i++)
+- regs_buff[133 + i] = rd32(E1000_RDBAH(i));
++ regs_buff[133 + i] = E1000_READ_REG(hw, E1000_RDBAH(i));
+ for (i = 0; i < 4; i++)
+- regs_buff[137 + i] = rd32(E1000_RDLEN(i));
++ regs_buff[137 + i] = E1000_READ_REG(hw, E1000_RDLEN(i));
+ for (i = 0; i < 4; i++)
+- regs_buff[141 + i] = rd32(E1000_RDH(i));
++ regs_buff[141 + i] = E1000_READ_REG(hw, E1000_RDH(i));
+ for (i = 0; i < 4; i++)
+- regs_buff[145 + i] = rd32(E1000_RDT(i));
++ regs_buff[145 + i] = E1000_READ_REG(hw, E1000_RDT(i));
+ for (i = 0; i < 4; i++)
+- regs_buff[149 + i] = rd32(E1000_RXDCTL(i));
++ regs_buff[149 + i] = E1000_READ_REG(hw, E1000_RXDCTL(i));
+
+ for (i = 0; i < 10; i++)
+- regs_buff[153 + i] = rd32(E1000_EITR(i));
++ regs_buff[153 + i] = E1000_READ_REG(hw, E1000_EITR(i));
+ for (i = 0; i < 8; i++)
+- regs_buff[163 + i] = rd32(E1000_IMIR(i));
++ regs_buff[163 + i] = E1000_READ_REG(hw, E1000_IMIR(i));
+ for (i = 0; i < 8; i++)
+- regs_buff[171 + i] = rd32(E1000_IMIREXT(i));
++ regs_buff[171 + i] = E1000_READ_REG(hw, E1000_IMIREXT(i));
+ for (i = 0; i < 16; i++)
+- regs_buff[179 + i] = rd32(E1000_RAL(i));
++ regs_buff[179 + i] = E1000_READ_REG(hw, E1000_RAL(i));
+ for (i = 0; i < 16; i++)
+- regs_buff[195 + i] = rd32(E1000_RAH(i));
++ regs_buff[195 + i] = E1000_READ_REG(hw, E1000_RAH(i));
+
+ for (i = 0; i < 4; i++)
+- regs_buff[211 + i] = rd32(E1000_TDBAL(i));
++ regs_buff[211 + i] = E1000_READ_REG(hw, E1000_TDBAL(i));
+ for (i = 0; i < 4; i++)
+- regs_buff[215 + i] = rd32(E1000_TDBAH(i));
++ regs_buff[215 + i] = E1000_READ_REG(hw, E1000_TDBAH(i));
+ for (i = 0; i < 4; i++)
+- regs_buff[219 + i] = rd32(E1000_TDLEN(i));
++ regs_buff[219 + i] = E1000_READ_REG(hw, E1000_TDLEN(i));
+ for (i = 0; i < 4; i++)
+- regs_buff[223 + i] = rd32(E1000_TDH(i));
++ regs_buff[223 + i] = E1000_READ_REG(hw, E1000_TDH(i));
+ for (i = 0; i < 4; i++)
+- regs_buff[227 + i] = rd32(E1000_TDT(i));
++ regs_buff[227 + i] = E1000_READ_REG(hw, E1000_TDT(i));
+ for (i = 0; i < 4; i++)
+- regs_buff[231 + i] = rd32(E1000_TXDCTL(i));
++ regs_buff[231 + i] = E1000_READ_REG(hw, E1000_TXDCTL(i));
+ for (i = 0; i < 4; i++)
+- regs_buff[235 + i] = rd32(E1000_TDWBAL(i));
++ regs_buff[235 + i] = E1000_READ_REG(hw, E1000_TDWBAL(i));
+ for (i = 0; i < 4; i++)
+- regs_buff[239 + i] = rd32(E1000_TDWBAH(i));
++ regs_buff[239 + i] = E1000_READ_REG(hw, E1000_TDWBAH(i));
+ for (i = 0; i < 4; i++)
+- regs_buff[243 + i] = rd32(E1000_DCA_TXCTRL(i));
++ regs_buff[243 + i] = E1000_READ_REG(hw, E1000_DCA_TXCTRL(i));
+
+ for (i = 0; i < 4; i++)
+- regs_buff[247 + i] = rd32(E1000_IP4AT_REG(i));
++ regs_buff[247 + i] = E1000_READ_REG(hw, E1000_IP4AT_REG(i));
+ for (i = 0; i < 4; i++)
+- regs_buff[251 + i] = rd32(E1000_IP6AT_REG(i));
++ regs_buff[251 + i] = E1000_READ_REG(hw, E1000_IP6AT_REG(i));
+ for (i = 0; i < 32; i++)
+- regs_buff[255 + i] = rd32(E1000_WUPM_REG(i));
++ regs_buff[255 + i] = E1000_READ_REG(hw, E1000_WUPM_REG(i));
+ for (i = 0; i < 128; i++)
+- regs_buff[287 + i] = rd32(E1000_FFMT_REG(i));
++ regs_buff[287 + i] = E1000_READ_REG(hw, E1000_FFMT_REG(i));
+ for (i = 0; i < 128; i++)
+- regs_buff[415 + i] = rd32(E1000_FFVT_REG(i));
++ regs_buff[415 + i] = E1000_READ_REG(hw, E1000_FFVT_REG(i));
+ for (i = 0; i < 4; i++)
+- regs_buff[543 + i] = rd32(E1000_FFLT_REG(i));
+-
+- regs_buff[547] = rd32(E1000_TDFH);
+- regs_buff[548] = rd32(E1000_TDFT);
+- regs_buff[549] = rd32(E1000_TDFHS);
+- regs_buff[550] = rd32(E1000_TDFPC);
++ regs_buff[543 + i] = E1000_READ_REG(hw, E1000_FFLT_REG(i));
+
++ regs_buff[547] = E1000_READ_REG(hw, E1000_TDFH);
++ regs_buff[548] = E1000_READ_REG(hw, E1000_TDFT);
++ regs_buff[549] = E1000_READ_REG(hw, E1000_TDFHS);
++ regs_buff[550] = E1000_READ_REG(hw, E1000_TDFPC);
+ if (hw->mac.type > e1000_82580) {
+ regs_buff[551] = adapter->stats.o2bgptc;
+ regs_buff[552] = adapter->stats.b2ospc;
+ regs_buff[553] = adapter->stats.o2bspc;
+ regs_buff[554] = adapter->stats.b2ogprc;
+ }
+-
+- if (hw->mac.type != e1000_82576)
+- return;
+- for (i = 0; i < 12; i++)
+- regs_buff[555 + i] = rd32(E1000_SRRCTL(i + 4));
+- for (i = 0; i < 4; i++)
+- regs_buff[567 + i] = rd32(E1000_PSRTYPE(i + 4));
+- for (i = 0; i < 12; i++)
+- regs_buff[571 + i] = rd32(E1000_RDBAL(i + 4));
+- for (i = 0; i < 12; i++)
+- regs_buff[583 + i] = rd32(E1000_RDBAH(i + 4));
+- for (i = 0; i < 12; i++)
+- regs_buff[595 + i] = rd32(E1000_RDLEN(i + 4));
+- for (i = 0; i < 12; i++)
+- regs_buff[607 + i] = rd32(E1000_RDH(i + 4));
+- for (i = 0; i < 12; i++)
+- regs_buff[619 + i] = rd32(E1000_RDT(i + 4));
+- for (i = 0; i < 12; i++)
+- regs_buff[631 + i] = rd32(E1000_RXDCTL(i + 4));
+-
+- for (i = 0; i < 12; i++)
+- regs_buff[643 + i] = rd32(E1000_TDBAL(i + 4));
+- for (i = 0; i < 12; i++)
+- regs_buff[655 + i] = rd32(E1000_TDBAH(i + 4));
+- for (i = 0; i < 12; i++)
+- regs_buff[667 + i] = rd32(E1000_TDLEN(i + 4));
+- for (i = 0; i < 12; i++)
+- regs_buff[679 + i] = rd32(E1000_TDH(i + 4));
+- for (i = 0; i < 12; i++)
+- regs_buff[691 + i] = rd32(E1000_TDT(i + 4));
+- for (i = 0; i < 12; i++)
+- regs_buff[703 + i] = rd32(E1000_TXDCTL(i + 4));
+- for (i = 0; i < 12; i++)
+- regs_buff[715 + i] = rd32(E1000_TDWBAL(i + 4));
+- for (i = 0; i < 12; i++)
+- regs_buff[727 + i] = rd32(E1000_TDWBAH(i + 4));
+ }
+
+ static int igb_get_eeprom_len(struct net_device *netdev)
+@@ -736,13 +733,13 @@
+ return -ENOMEM;
+
+ if (hw->nvm.type == e1000_nvm_eeprom_spi)
+- ret_val = hw->nvm.ops.read(hw, first_word,
+- last_word - first_word + 1,
+- eeprom_buff);
++ ret_val = e1000_read_nvm(hw, first_word,
++ last_word - first_word + 1,
++ eeprom_buff);
+ else {
+ for (i = 0; i < last_word - first_word + 1; i++) {
+- ret_val = hw->nvm.ops.read(hw, first_word + i, 1,
+- &eeprom_buff[i]);
++ ret_val = e1000_read_nvm(hw, first_word + i, 1,
++ &eeprom_buff[i]);
+ if (ret_val)
+ break;
+ }
+@@ -750,7 +747,7 @@
+
+ /* Device's eeprom is always little-endian, word addressable */
+ for (i = 0; i < last_word - first_word + 1; i++)
+- le16_to_cpus(&eeprom_buff[i]);
++ eeprom_buff[i] = le16_to_cpu(eeprom_buff[i]);
+
+ memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1),
+ eeprom->len);
+@@ -772,11 +769,6 @@
+ if (eeprom->len == 0)
+ return -EOPNOTSUPP;
+
+- if ((hw->mac.type >= e1000_i210) &&
+- !igb_get_flash_presence_i210(hw)) {
+- return -EOPNOTSUPP;
+- }
+-
+ if (eeprom->magic != (hw->vendor_id | (hw->device_id << 16)))
+ return -EFAULT;
+
+@@ -791,19 +783,17 @@
+ ptr = (void *)eeprom_buff;
+
+ if (eeprom->offset & 1) {
+- /* need read/modify/write of first changed EEPROM word
+- * only the second byte of the word is being modified
+- */
+- ret_val = hw->nvm.ops.read(hw, first_word, 1,
++ /* need read/modify/write of first changed EEPROM word */
++ /* only the second byte of the word is being modified */
++ ret_val = e1000_read_nvm(hw, first_word, 1,
+ &eeprom_buff[0]);
+ ptr++;
+ }
+ if (((eeprom->offset + eeprom->len) & 1) && (ret_val == 0)) {
+- /* need read/modify/write of last changed EEPROM word
+- * only the first byte of the word is being modified
+- */
+- ret_val = hw->nvm.ops.read(hw, last_word, 1,
+- &eeprom_buff[last_word - first_word]);
++ /* need read/modify/write of last changed EEPROM word */
++ /* only the first byte of the word is being modified */
++ ret_val = e1000_read_nvm(hw, last_word, 1,
++ &eeprom_buff[last_word - first_word]);
+ }
+
+ /* Device's eeprom is always little-endian, word addressable */
+@@ -813,16 +803,16 @@
+ memcpy(ptr, bytes, eeprom->len);
+
+ for (i = 0; i < last_word - first_word + 1; i++)
+- eeprom_buff[i] = cpu_to_le16(eeprom_buff[i]);
++ cpu_to_le16s(&eeprom_buff[i]);
+
+- ret_val = hw->nvm.ops.write(hw, first_word,
+- last_word - first_word + 1, eeprom_buff);
++ ret_val = e1000_write_nvm(hw, first_word,
++ last_word - first_word + 1, eeprom_buff);
+
+- /* Update the checksum if nvm write succeeded */
++ /* Update the checksum if write succeeded.
++ * and flush shadow RAM for 82573 controllers */
+ if (ret_val == 0)
+- hw->nvm.ops.update(hw);
++ e1000_update_nvm_checksum(hw);
+
+- igb_set_fw_version(adapter);
+ kfree(eeprom_buff);
+ return ret_val;
+ }
+@@ -832,16 +822,14 @@
+ {
+ struct igb_adapter *adapter = netdev_priv(netdev);
+
+- strlcpy(drvinfo->driver, igb_driver_name, sizeof(drvinfo->driver));
+- strlcpy(drvinfo->version, igb_driver_version, sizeof(drvinfo->version));
+-
+- /* EEPROM image version # is reported as firmware version # for
+- * 82575 controllers
+- */
+- strlcpy(drvinfo->fw_version, adapter->fw_version,
+- sizeof(drvinfo->fw_version));
+- strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
+- sizeof(drvinfo->bus_info));
++ strncpy(drvinfo->driver, igb_driver_name, sizeof(drvinfo->driver) - 1);
++ strncpy(drvinfo->version, igb_driver_version,
++ sizeof(drvinfo->version) - 1);
++
++ strncpy(drvinfo->fw_version, adapter->fw_version,
++ sizeof(drvinfo->fw_version) - 1);
++ strncpy(drvinfo->bus_info, pci_name(adapter->pdev),
++ sizeof(drvinfo->bus_info) - 1);
+ drvinfo->n_stats = IGB_STATS_LEN;
+ drvinfo->testinfo_len = IGB_TEST_LEN;
+ drvinfo->regdump_len = igb_get_regs_len(netdev);
+@@ -855,8 +843,12 @@
+
+ ring->rx_max_pending = IGB_MAX_RXD;
+ ring->tx_max_pending = IGB_MAX_TXD;
++ ring->rx_mini_max_pending = 0;
++ ring->rx_jumbo_max_pending = 0;
+ ring->rx_pending = adapter->rx_ring_count;
+ ring->tx_pending = adapter->tx_ring_count;
++ ring->rx_mini_pending = 0;
++ ring->rx_jumbo_pending = 0;
+ }
+
+ static int igb_set_ringparam(struct net_device *netdev,
+@@ -870,12 +862,12 @@
+ if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
+ return -EINVAL;
+
+- new_rx_count = min_t(u32, ring->rx_pending, IGB_MAX_RXD);
+- new_rx_count = max_t(u16, new_rx_count, IGB_MIN_RXD);
++ new_rx_count = min_t(u16, ring->rx_pending, (u32)IGB_MAX_RXD);
++ new_rx_count = max_t(u16, new_rx_count, (u16)IGB_MIN_RXD);
+ new_rx_count = ALIGN(new_rx_count, REQ_RX_DESCRIPTOR_MULTIPLE);
+
+- new_tx_count = min_t(u32, ring->tx_pending, IGB_MAX_TXD);
+- new_tx_count = max_t(u16, new_tx_count, IGB_MIN_TXD);
++ new_tx_count = min_t(u16, ring->tx_pending, (u32)IGB_MAX_TXD);
++ new_tx_count = max_t(u16, new_tx_count, (u16)IGB_MIN_TXD);
+ new_tx_count = ALIGN(new_tx_count, REQ_TX_DESCRIPTOR_MULTIPLE);
+
+ if ((new_tx_count == adapter->tx_ring_count) &&
+@@ -898,11 +890,11 @@
+ }
+
+ if (adapter->num_tx_queues > adapter->num_rx_queues)
+- temp_ring = vmalloc(adapter->num_tx_queues *
+- sizeof(struct igb_ring));
++ temp_ring = vmalloc(adapter->num_tx_queues
++ * sizeof(struct igb_ring));
+ else
+- temp_ring = vmalloc(adapter->num_rx_queues *
+- sizeof(struct igb_ring));
++ temp_ring = vmalloc(adapter->num_rx_queues
++ * sizeof(struct igb_ring));
+
+ if (!temp_ring) {
+ err = -ENOMEM;
+@@ -911,9 +903,10 @@
+
+ igb_down(adapter);
+
+- /* We can't just free everything and then setup again,
++ /*
++ * We can't just free everything and then setup again,
+ * because the ISRs in MSI-X mode get passed pointers
+- * to the Tx and Rx ring structs.
++ * to the tx and rx ring structs.
+ */
+ if (new_tx_count != adapter->tx_ring_count) {
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+@@ -975,224 +968,6 @@
+ return err;
+ }
+
+-/* ethtool register test data */
+-struct igb_reg_test {
+- u16 reg;
+- u16 reg_offset;
+- u16 array_len;
+- u16 test_type;
+- u32 mask;
+- u32 write;
+-};
+-
+-/* In the hardware, registers are laid out either singly, in arrays
+- * spaced 0x100 bytes apart, or in contiguous tables. We assume
+- * most tests take place on arrays or single registers (handled
+- * as a single-element array) and special-case the tables.
+- * Table tests are always pattern tests.
+- *
+- * We also make provision for some required setup steps by specifying
+- * registers to be written without any read-back testing.
+- */
+-
+-#define PATTERN_TEST 1
+-#define SET_READ_TEST 2
+-#define WRITE_NO_TEST 3
+-#define TABLE32_TEST 4
+-#define TABLE64_TEST_LO 5
+-#define TABLE64_TEST_HI 6
+-
+-/* i210 reg test */
+-static struct igb_reg_test reg_test_i210[] = {
+- { E1000_FCAL, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+- { E1000_FCAH, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF },
+- { E1000_FCT, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF },
+- { E1000_RDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
+- { E1000_RDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+- { E1000_RDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
+- /* RDH is read-only for i210, only test RDT. */
+- { E1000_RDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
+- { E1000_FCRTH, 0x100, 1, PATTERN_TEST, 0x0000FFF0, 0x0000FFF0 },
+- { E1000_FCTTV, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
+- { E1000_TIPG, 0x100, 1, PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF },
+- { E1000_TDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
+- { E1000_TDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+- { E1000_TDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
+- { E1000_TDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
+- { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
+- { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB },
+- { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF },
+- { E1000_TCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
+- { E1000_RA, 0, 16, TABLE64_TEST_LO,
+- 0xFFFFFFFF, 0xFFFFFFFF },
+- { E1000_RA, 0, 16, TABLE64_TEST_HI,
+- 0x900FFFFF, 0xFFFFFFFF },
+- { E1000_MTA, 0, 128, TABLE32_TEST,
+- 0xFFFFFFFF, 0xFFFFFFFF },
+- { 0, 0, 0, 0, 0 }
+-};
+-
+-/* i350 reg test */
+-static struct igb_reg_test reg_test_i350[] = {
+- { E1000_FCAL, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+- { E1000_FCAH, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF },
+- { E1000_FCT, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF },
+- { E1000_VET, 0x100, 1, PATTERN_TEST, 0xFFFF0000, 0xFFFF0000 },
+- { E1000_RDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
+- { E1000_RDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+- { E1000_RDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
+- { E1000_RDBAL(4), 0x40, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
+- { E1000_RDBAH(4), 0x40, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+- { E1000_RDLEN(4), 0x40, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
+- /* RDH is read-only for i350, only test RDT. */
+- { E1000_RDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
+- { E1000_RDT(4), 0x40, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
+- { E1000_FCRTH, 0x100, 1, PATTERN_TEST, 0x0000FFF0, 0x0000FFF0 },
+- { E1000_FCTTV, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
+- { E1000_TIPG, 0x100, 1, PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF },
+- { E1000_TDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
+- { E1000_TDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+- { E1000_TDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
+- { E1000_TDBAL(4), 0x40, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
+- { E1000_TDBAH(4), 0x40, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+- { E1000_TDLEN(4), 0x40, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
+- { E1000_TDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
+- { E1000_TDT(4), 0x40, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
+- { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
+- { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB },
+- { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF },
+- { E1000_TCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
+- { E1000_RA, 0, 16, TABLE64_TEST_LO,
+- 0xFFFFFFFF, 0xFFFFFFFF },
+- { E1000_RA, 0, 16, TABLE64_TEST_HI,
+- 0xC3FFFFFF, 0xFFFFFFFF },
+- { E1000_RA2, 0, 16, TABLE64_TEST_LO,
+- 0xFFFFFFFF, 0xFFFFFFFF },
+- { E1000_RA2, 0, 16, TABLE64_TEST_HI,
+- 0xC3FFFFFF, 0xFFFFFFFF },
+- { E1000_MTA, 0, 128, TABLE32_TEST,
+- 0xFFFFFFFF, 0xFFFFFFFF },
+- { 0, 0, 0, 0 }
+-};
+-
+-/* 82580 reg test */
+-static struct igb_reg_test reg_test_82580[] = {
+- { E1000_FCAL, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+- { E1000_FCAH, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF },
+- { E1000_FCT, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF },
+- { E1000_VET, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+- { E1000_RDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
+- { E1000_RDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+- { E1000_RDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF },
+- { E1000_RDBAL(4), 0x40, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
+- { E1000_RDBAH(4), 0x40, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+- { E1000_RDLEN(4), 0x40, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF },
+- /* RDH is read-only for 82580, only test RDT. */
+- { E1000_RDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
+- { E1000_RDT(4), 0x40, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
+- { E1000_FCRTH, 0x100, 1, PATTERN_TEST, 0x0000FFF0, 0x0000FFF0 },
+- { E1000_FCTTV, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
+- { E1000_TIPG, 0x100, 1, PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF },
+- { E1000_TDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
+- { E1000_TDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+- { E1000_TDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF },
+- { E1000_TDBAL(4), 0x40, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
+- { E1000_TDBAH(4), 0x40, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+- { E1000_TDLEN(4), 0x40, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF },
+- { E1000_TDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
+- { E1000_TDT(4), 0x40, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
+- { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
+- { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB },
+- { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF },
+- { E1000_TCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
+- { E1000_RA, 0, 16, TABLE64_TEST_LO,
+- 0xFFFFFFFF, 0xFFFFFFFF },
+- { E1000_RA, 0, 16, TABLE64_TEST_HI,
+- 0x83FFFFFF, 0xFFFFFFFF },
+- { E1000_RA2, 0, 8, TABLE64_TEST_LO,
+- 0xFFFFFFFF, 0xFFFFFFFF },
+- { E1000_RA2, 0, 8, TABLE64_TEST_HI,
+- 0x83FFFFFF, 0xFFFFFFFF },
+- { E1000_MTA, 0, 128, TABLE32_TEST,
+- 0xFFFFFFFF, 0xFFFFFFFF },
+- { 0, 0, 0, 0 }
+-};
+-
+-/* 82576 reg test */
+-static struct igb_reg_test reg_test_82576[] = {
+- { E1000_FCAL, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+- { E1000_FCAH, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF },
+- { E1000_FCT, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF },
+- { E1000_VET, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+- { E1000_RDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
+- { E1000_RDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+- { E1000_RDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF },
+- { E1000_RDBAL(4), 0x40, 12, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
+- { E1000_RDBAH(4), 0x40, 12, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+- { E1000_RDLEN(4), 0x40, 12, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF },
+- /* Enable all RX queues before testing. */
+- { E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0,
+- E1000_RXDCTL_QUEUE_ENABLE },
+- { E1000_RXDCTL(4), 0x40, 12, WRITE_NO_TEST, 0,
+- E1000_RXDCTL_QUEUE_ENABLE },
+- /* RDH is read-only for 82576, only test RDT. */
+- { E1000_RDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
+- { E1000_RDT(4), 0x40, 12, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
+- { E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0, 0 },
+- { E1000_RXDCTL(4), 0x40, 12, WRITE_NO_TEST, 0, 0 },
+- { E1000_FCRTH, 0x100, 1, PATTERN_TEST, 0x0000FFF0, 0x0000FFF0 },
+- { E1000_FCTTV, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
+- { E1000_TIPG, 0x100, 1, PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF },
+- { E1000_TDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
+- { E1000_TDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+- { E1000_TDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF },
+- { E1000_TDBAL(4), 0x40, 12, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
+- { E1000_TDBAH(4), 0x40, 12, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+- { E1000_TDLEN(4), 0x40, 12, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF },
+- { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
+- { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB },
+- { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF },
+- { E1000_TCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
+- { E1000_RA, 0, 16, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF },
+- { E1000_RA, 0, 16, TABLE64_TEST_HI, 0x83FFFFFF, 0xFFFFFFFF },
+- { E1000_RA2, 0, 8, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF },
+- { E1000_RA2, 0, 8, TABLE64_TEST_HI, 0x83FFFFFF, 0xFFFFFFFF },
+- { E1000_MTA, 0, 128, TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+- { 0, 0, 0, 0 }
+-};
+-
+-/* 82575 register test */
+-static struct igb_reg_test reg_test_82575[] = {
+- { E1000_FCAL, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+- { E1000_FCAH, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF },
+- { E1000_FCT, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF },
+- { E1000_VET, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+- { E1000_RDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
+- { E1000_RDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+- { E1000_RDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
+- /* Enable all four RX queues before testing. */
+- { E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0,
+- E1000_RXDCTL_QUEUE_ENABLE },
+- /* RDH is read-only for 82575, only test RDT. */
+- { E1000_RDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
+- { E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0, 0 },
+- { E1000_FCRTH, 0x100, 1, PATTERN_TEST, 0x0000FFF0, 0x0000FFF0 },
+- { E1000_FCTTV, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
+- { E1000_TIPG, 0x100, 1, PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF },
+- { E1000_TDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
+- { E1000_TDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+- { E1000_TDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
+- { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
+- { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB3FE, 0x003FFFFB },
+- { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB3FE, 0xFFFFFFFF },
+- { E1000_TCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
+- { E1000_TXCW, 0x100, 1, PATTERN_TEST, 0xC000FFFF, 0x0000FFFF },
+- { E1000_RA, 0, 16, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF },
+- { E1000_RA, 0, 16, TABLE64_TEST_HI, 0x800FFFFF, 0xFFFFFFFF },
+- { E1000_MTA, 0, 128, TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+- { 0, 0, 0, 0 }
+-};
+-
+ static bool reg_pattern_test(struct igb_adapter *adapter, u64 *data,
+ int reg, u32 mask, u32 write)
+ {
+@@ -1201,13 +976,14 @@
+ static const u32 _test[] = {
+ 0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF};
+ for (pat = 0; pat < ARRAY_SIZE(_test); pat++) {
+- wr32(reg, (_test[pat] & write));
+- val = rd32(reg) & mask;
++ E1000_WRITE_REG(hw, reg, (_test[pat] & write));
++ val = E1000_READ_REG(hw, reg) & mask;
+ if (val != (_test[pat] & write & mask)) {
+- dev_err(&adapter->pdev->dev,
++ dev_err(pci_dev_to_dev(adapter->pdev),
+ "pattern test reg %04X failed: got 0x%08X expected 0x%08X\n",
+- reg, val, (_test[pat] & write & mask));
+- *data = reg;
++ E1000_REGISTER(hw, reg), val, (_test[pat]
++ & write & mask));
++ *data = E1000_REGISTER(hw, reg);
+ return true;
+ }
+ }
+@@ -1220,14 +996,13 @@
+ {
+ struct e1000_hw *hw = &adapter->hw;
+ u32 val;
+-
+- wr32(reg, write & mask);
+- val = rd32(reg);
++ E1000_WRITE_REG(hw, reg, write & mask);
++ val = E1000_READ_REG(hw, reg);
+ if ((write & mask) != (val & mask)) {
+- dev_err(&adapter->pdev->dev,
+- "set/check reg %04X test failed: got 0x%08X expected 0x%08X\n",
++ dev_err(pci_dev_to_dev(adapter->pdev),
++ "set/check reg %04X test failed:got 0x%08X expected 0x%08X\n",
+ reg, (val & mask), (write & mask));
+- *data = reg;
++ *data = E1000_REGISTER(hw, reg);
+ return true;
+ }
+
+@@ -1283,19 +1058,19 @@
+ * tests. Some bits are read-only, some toggle, and some
+ * are writable on newer MACs.
+ */
+- before = rd32(E1000_STATUS);
+- value = (rd32(E1000_STATUS) & toggle);
+- wr32(E1000_STATUS, toggle);
+- after = rd32(E1000_STATUS) & toggle;
++ before = E1000_READ_REG(hw, E1000_STATUS);
++ value = (E1000_READ_REG(hw, E1000_STATUS) & toggle);
++ E1000_WRITE_REG(hw, E1000_STATUS, toggle);
++ after = E1000_READ_REG(hw, E1000_STATUS) & toggle;
+ if (value != after) {
+- dev_err(&adapter->pdev->dev,
++ dev_err(pci_dev_to_dev(adapter->pdev),
+ "failed STATUS register test got: 0x%08X expected: 0x%08X\n",
+ after, value);
+ *data = 1;
+ return 1;
+ }
+ /* restore previous status */
+- wr32(E1000_STATUS, before);
++ E1000_WRITE_REG(hw, E1000_STATUS, before);
+
+ /* Perform the remainder of the register test, looping through
+ * the test table until we either fail or reach the null entry.
+@@ -1317,7 +1092,7 @@
+ break;
+ case WRITE_NO_TEST:
+ writel(test->write,
+- (adapter->hw.hw_addr + test->reg)
++ (adapter->hw.hw_addr + test->reg)
+ + (i * test->reg_offset));
+ break;
+ case TABLE32_TEST:
+@@ -1346,24 +1121,11 @@
+
+ static int igb_eeprom_test(struct igb_adapter *adapter, u64 *data)
+ {
+- struct e1000_hw *hw = &adapter->hw;
+-
+ *data = 0;
+
+- /* Validate eeprom on all parts but flashless */
+- switch (hw->mac.type) {
+- case e1000_i210:
+- case e1000_i211:
+- if (igb_get_flash_presence_i210(hw)) {
+- if (adapter->hw.nvm.ops.validate(&adapter->hw) < 0)
+- *data = 2;
+- }
+- break;
+- default:
+- if (adapter->hw.nvm.ops.validate(&adapter->hw) < 0)
+- *data = 2;
+- break;
+- }
++ /* Validate NVM checksum */
++ if (e1000_validate_nvm_checksum(&adapter->hw) < 0)
++ *data = 2;
+
+ return *data;
+ }
+@@ -1373,7 +1135,7 @@
+ struct igb_adapter *adapter = (struct igb_adapter *) data;
+ struct e1000_hw *hw = &adapter->hw;
+
+- adapter->test_icr |= rd32(E1000_ICR);
++ adapter->test_icr |= E1000_READ_REG(hw, E1000_ICR);
+
+ return IRQ_HANDLED;
+ }
+@@ -1382,20 +1144,20 @@
+ {
+ struct e1000_hw *hw = &adapter->hw;
+ struct net_device *netdev = adapter->netdev;
+- u32 mask, ics_mask, i = 0, shared_int = true;
++ u32 mask, ics_mask, i = 0, shared_int = TRUE;
+ u32 irq = adapter->pdev->irq;
+
+ *data = 0;
+
+ /* Hook up test interrupt handler just for this test */
+- if (adapter->flags & IGB_FLAG_HAS_MSIX) {
++ if (adapter->msix_entries) {
+ if (request_irq(adapter->msix_entries[0].vector,
+- igb_test_intr, 0, netdev->name, adapter)) {
++ &igb_test_intr, 0, netdev->name, adapter)) {
+ *data = 1;
+ return -1;
+ }
+ } else if (adapter->flags & IGB_FLAG_HAS_MSI) {
+- shared_int = false;
++ shared_int = FALSE;
+ if (request_irq(irq,
+ igb_test_intr, 0, netdev->name, adapter)) {
+ *data = 1;
+@@ -1403,19 +1165,19 @@
+ }
+ } else if (!request_irq(irq, igb_test_intr, IRQF_PROBE_SHARED,
+ netdev->name, adapter)) {
+- shared_int = false;
+- } else if (request_irq(irq, igb_test_intr, IRQF_SHARED,
++ shared_int = FALSE;
++ } else if (request_irq(irq, &igb_test_intr, IRQF_SHARED,
+ netdev->name, adapter)) {
+ *data = 1;
+ return -1;
+ }
+- dev_info(&adapter->pdev->dev, "testing %s interrupt\n",
+- (shared_int ? "shared" : "unshared"));
++ dev_info(pci_dev_to_dev(adapter->pdev), "testing %s interrupt\n",
++ (shared_int ? "shared" : "unshared"));
+
+ /* Disable all the interrupts */
+- wr32(E1000_IMC, ~0);
+- wrfl();
+- usleep_range(10000, 11000);
++ E1000_WRITE_REG(hw, E1000_IMC, ~0);
++ E1000_WRITE_FLUSH(hw);
++ usleep_range(10000, 20000);
+
+ /* Define all writable bits for ICS */
+ switch (hw->mac.type) {
+@@ -1430,9 +1192,11 @@
+ break;
+ case e1000_i350:
+ case e1000_i354:
++ ics_mask = 0x77DCFED5;
++ break;
+ case e1000_i210:
+ case e1000_i211:
+- ics_mask = 0x77DCFED5;
++ ics_mask = 0x774CFED5;
+ break;
+ default:
+ ics_mask = 0x7FFFFFFF;
+@@ -1457,12 +1221,12 @@
+ adapter->test_icr = 0;
+
+ /* Flush any pending interrupts */
+- wr32(E1000_ICR, ~0);
++ E1000_WRITE_REG(hw, E1000_ICR, ~0);
+
+- wr32(E1000_IMC, mask);
+- wr32(E1000_ICS, mask);
+- wrfl();
+- usleep_range(10000, 11000);
++ E1000_WRITE_REG(hw, E1000_IMC, mask);
++ E1000_WRITE_REG(hw, E1000_ICS, mask);
++ E1000_WRITE_FLUSH(hw);
++ usleep_range(10000, 20000);
+
+ if (adapter->test_icr & mask) {
+ *data = 3;
+@@ -1479,12 +1243,12 @@
+ adapter->test_icr = 0;
+
+ /* Flush any pending interrupts */
+- wr32(E1000_ICR, ~0);
++ E1000_WRITE_REG(hw, E1000_ICR, ~0);
+
+- wr32(E1000_IMS, mask);
+- wr32(E1000_ICS, mask);
+- wrfl();
+- usleep_range(10000, 11000);
++ E1000_WRITE_REG(hw, E1000_IMS, mask);
++ E1000_WRITE_REG(hw, E1000_ICS, mask);
++ E1000_WRITE_FLUSH(hw);
++ usleep_range(10000, 20000);
+
+ if (!(adapter->test_icr & mask)) {
+ *data = 4;
+@@ -1501,12 +1265,12 @@
+ adapter->test_icr = 0;
+
+ /* Flush any pending interrupts */
+- wr32(E1000_ICR, ~0);
++ E1000_WRITE_REG(hw, E1000_ICR, ~0);
+
+- wr32(E1000_IMC, ~mask);
+- wr32(E1000_ICS, ~mask);
+- wrfl();
+- usleep_range(10000, 11000);
++ E1000_WRITE_REG(hw, E1000_IMC, ~mask);
++ E1000_WRITE_REG(hw, E1000_ICS, ~mask);
++ E1000_WRITE_FLUSH(hw);
++ usleep_range(10000, 20000);
+
+ if (adapter->test_icr & mask) {
+ *data = 5;
+@@ -1516,12 +1280,12 @@
+ }
+
+ /* Disable all the interrupts */
+- wr32(E1000_IMC, ~0);
+- wrfl();
+- usleep_range(10000, 11000);
++ E1000_WRITE_REG(hw, E1000_IMC, ~0);
++ E1000_WRITE_FLUSH(hw);
++ usleep_range(10000, 20000);
+
+ /* Unhook test interrupt handler */
+- if (adapter->flags & IGB_FLAG_HAS_MSIX)
++ if (adapter->msix_entries)
+ free_irq(adapter->msix_entries[0].vector, adapter);
+ else
+ free_irq(irq, adapter);
+@@ -1544,7 +1308,7 @@
+
+ /* Setup Tx descriptor ring and Tx buffers */
+ tx_ring->count = IGB_DEFAULT_TXD;
+- tx_ring->dev = &adapter->pdev->dev;
++ tx_ring->dev = pci_dev_to_dev(adapter->pdev);
+ tx_ring->netdev = adapter->netdev;
+ tx_ring->reg_idx = adapter->vfs_allocated_count;
+
+@@ -1558,17 +1322,20 @@
+
+ /* Setup Rx descriptor ring and Rx buffers */
+ rx_ring->count = IGB_DEFAULT_RXD;
+- rx_ring->dev = &adapter->pdev->dev;
++ rx_ring->dev = pci_dev_to_dev(adapter->pdev);
+ rx_ring->netdev = adapter->netdev;
++#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT
++ rx_ring->rx_buffer_len = IGB_RX_HDR_LEN;
++#endif
+ rx_ring->reg_idx = adapter->vfs_allocated_count;
+
+ if (igb_setup_rx_resources(rx_ring)) {
+- ret_val = 3;
++ ret_val = 2;
+ goto err_nomem;
+ }
+
+ /* set the default queue to queue 0 of PF */
+- wr32(E1000_MRQC, adapter->vfs_allocated_count << 3);
++ E1000_WRITE_REG(hw, E1000_MRQC, adapter->vfs_allocated_count << 3);
+
+ /* enable receive ring */
+ igb_setup_rctl(adapter);
+@@ -1588,10 +1355,10 @@
+ struct e1000_hw *hw = &adapter->hw;
+
+ /* Write out to PHY registers 29 and 30 to disable the Receiver. */
+- igb_write_phy_reg(hw, 29, 0x001F);
+- igb_write_phy_reg(hw, 30, 0x8FFC);
+- igb_write_phy_reg(hw, 29, 0x001A);
+- igb_write_phy_reg(hw, 30, 0x8FF0);
++ igb_e1000_write_phy_reg(hw, 29, 0x001F);
++ igb_e1000_write_phy_reg(hw, 30, 0x8FFC);
++ igb_e1000_write_phy_reg(hw, 29, 0x001A);
++ igb_e1000_write_phy_reg(hw, 30, 0x8FF0);
+ }
+
+ static int igb_integrated_phy_loopback(struct igb_adapter *adapter)
+@@ -1599,34 +1366,32 @@
+ struct e1000_hw *hw = &adapter->hw;
+ u32 ctrl_reg = 0;
+
+- hw->mac.autoneg = false;
++ hw->mac.autoneg = FALSE;
+
+ if (hw->phy.type == e1000_phy_m88) {
+ if (hw->phy.id != I210_I_PHY_ID) {
+ /* Auto-MDI/MDIX Off */
+- igb_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, 0x0808);
++ igb_e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, 0x0808);
+ /* reset to update Auto-MDI/MDIX */
+- igb_write_phy_reg(hw, PHY_CONTROL, 0x9140);
++ igb_e1000_write_phy_reg(hw, PHY_CONTROL, 0x9140);
+ /* autoneg off */
+- igb_write_phy_reg(hw, PHY_CONTROL, 0x8140);
++ igb_e1000_write_phy_reg(hw, PHY_CONTROL, 0x8140);
+ } else {
+ /* force 1000, set loopback */
+- igb_write_phy_reg(hw, I347AT4_PAGE_SELECT, 0);
+- igb_write_phy_reg(hw, PHY_CONTROL, 0x4140);
++ igb_e1000_write_phy_reg(hw, I347AT4_PAGE_SELECT, 0);
++ igb_e1000_write_phy_reg(hw, PHY_CONTROL, 0x4140);
+ }
+- } else if (hw->phy.type == e1000_phy_82580) {
++ } else {
+ /* enable MII loopback */
+- igb_write_phy_reg(hw, I82580_PHY_LBK_CTRL, 0x8041);
++ if (hw->phy.type == e1000_phy_82580)
++ igb_e1000_write_phy_reg(hw, I82577_PHY_LBK_CTRL, 0x8041);
+ }
+
+- /* add small delay to avoid loopback test failure */
+- msleep(50);
+-
+- /* force 1000, set loopback */
+- igb_write_phy_reg(hw, PHY_CONTROL, 0x4140);
++ /* force 1000, set loopback */
++ igb_e1000_write_phy_reg(hw, PHY_CONTROL, 0x4140);
+
+ /* Now set up the MAC to the same speed/duplex as the PHY. */
+- ctrl_reg = rd32(E1000_CTRL);
++ ctrl_reg = E1000_READ_REG(hw, E1000_CTRL);
+ ctrl_reg &= ~E1000_CTRL_SPD_SEL; /* Clear the speed sel bits */
+ ctrl_reg |= (E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */
+ E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */
+@@ -1637,7 +1402,7 @@
+ if (hw->phy.type == e1000_phy_m88)
+ ctrl_reg |= E1000_CTRL_ILOS; /* Invert Loss of Signal */
+
+- wr32(E1000_CTRL, ctrl_reg);
++ E1000_WRITE_REG(hw, E1000_CTRL, ctrl_reg);
+
+ /* Disable the receiver on the PHY so when a cable is plugged in, the
+ * PHY does not begin to autoneg when a cable is reconnected to the NIC.
+@@ -1659,64 +1424,64 @@
+ struct e1000_hw *hw = &adapter->hw;
+ u32 reg;
+
+- reg = rd32(E1000_CTRL_EXT);
++ reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
+
+ /* use CTRL_EXT to identify link type as SGMII can appear as copper */
+ if (reg & E1000_CTRL_EXT_LINK_MODE_MASK) {
+ if ((hw->device_id == E1000_DEV_ID_DH89XXCC_SGMII) ||
+- (hw->device_id == E1000_DEV_ID_DH89XXCC_SERDES) ||
+- (hw->device_id == E1000_DEV_ID_DH89XXCC_BACKPLANE) ||
+- (hw->device_id == E1000_DEV_ID_DH89XXCC_SFP) ||
+- (hw->device_id == E1000_DEV_ID_I354_SGMII) ||
+- (hw->device_id == E1000_DEV_ID_I354_BACKPLANE_2_5GBPS)) {
++ (hw->device_id == E1000_DEV_ID_DH89XXCC_SERDES) ||
++ (hw->device_id == E1000_DEV_ID_DH89XXCC_BACKPLANE) ||
++ (hw->device_id == E1000_DEV_ID_DH89XXCC_SFP) ||
++ (hw->device_id == E1000_DEV_ID_I354_SGMII) ||
++ (hw->device_id == E1000_DEV_ID_I354_BACKPLANE_2_5GBPS)) {
+ /* Enable DH89xxCC MPHY for near end loopback */
+- reg = rd32(E1000_MPHY_ADDR_CTL);
++ reg = E1000_READ_REG(hw, E1000_MPHY_ADDR_CTL);
+ reg = (reg & E1000_MPHY_ADDR_CTL_OFFSET_MASK) |
+- E1000_MPHY_PCS_CLK_REG_OFFSET;
+- wr32(E1000_MPHY_ADDR_CTL, reg);
++ E1000_MPHY_PCS_CLK_REG_OFFSET;
++ E1000_WRITE_REG(hw, E1000_MPHY_ADDR_CTL, reg);
+
+- reg = rd32(E1000_MPHY_DATA);
++ reg = E1000_READ_REG(hw, E1000_MPHY_DATA);
+ reg |= E1000_MPHY_PCS_CLK_REG_DIGINELBEN;
+- wr32(E1000_MPHY_DATA, reg);
++ E1000_WRITE_REG(hw, E1000_MPHY_DATA, reg);
+ }
+
+- reg = rd32(E1000_RCTL);
++ reg = E1000_READ_REG(hw, E1000_RCTL);
+ reg |= E1000_RCTL_LBM_TCVR;
+- wr32(E1000_RCTL, reg);
++ E1000_WRITE_REG(hw, E1000_RCTL, reg);
+
+- wr32(E1000_SCTL, E1000_ENABLE_SERDES_LOOPBACK);
++ E1000_WRITE_REG(hw, E1000_SCTL, E1000_ENABLE_SERDES_LOOPBACK);
+
+- reg = rd32(E1000_CTRL);
++ reg = E1000_READ_REG(hw, E1000_CTRL);
+ reg &= ~(E1000_CTRL_RFCE |
+ E1000_CTRL_TFCE |
+ E1000_CTRL_LRST);
+ reg |= E1000_CTRL_SLU |
+ E1000_CTRL_FD;
+- wr32(E1000_CTRL, reg);
++ E1000_WRITE_REG(hw, E1000_CTRL, reg);
+
+ /* Unset switch control to serdes energy detect */
+- reg = rd32(E1000_CONNSW);
++ reg = E1000_READ_REG(hw, E1000_CONNSW);
+ reg &= ~E1000_CONNSW_ENRGSRC;
+- wr32(E1000_CONNSW, reg);
++ E1000_WRITE_REG(hw, E1000_CONNSW, reg);
+
+ /* Unset sigdetect for SERDES loopback on
+- * 82580 and newer devices.
++ * 82580 and newer devices
+ */
+ if (hw->mac.type >= e1000_82580) {
+- reg = rd32(E1000_PCS_CFG0);
++ reg = E1000_READ_REG(hw, E1000_PCS_CFG0);
+ reg |= E1000_PCS_CFG_IGN_SD;
+- wr32(E1000_PCS_CFG0, reg);
++ E1000_WRITE_REG(hw, E1000_PCS_CFG0, reg);
+ }
+
+ /* Set PCS register for forced speed */
+- reg = rd32(E1000_PCS_LCTL);
++ reg = E1000_READ_REG(hw, E1000_PCS_LCTL);
+ reg &= ~E1000_PCS_LCTL_AN_ENABLE; /* Disable Autoneg*/
+ reg |= E1000_PCS_LCTL_FLV_LINK_UP | /* Force link up */
+ E1000_PCS_LCTL_FSV_1000 | /* Force 1000 */
+ E1000_PCS_LCTL_FDV_FULL | /* SerDes Full duplex */
+ E1000_PCS_LCTL_FSD | /* Force Speed */
+ E1000_PCS_LCTL_FORCE_LINK; /* Force Link */
+- wr32(E1000_PCS_LCTL, reg);
++ E1000_WRITE_REG(hw, E1000_PCS_LCTL, reg);
+
+ return 0;
+ }
+@@ -1731,36 +1496,37 @@
+ u16 phy_reg;
+
+ if ((hw->device_id == E1000_DEV_ID_DH89XXCC_SGMII) ||
+- (hw->device_id == E1000_DEV_ID_DH89XXCC_SERDES) ||
+- (hw->device_id == E1000_DEV_ID_DH89XXCC_BACKPLANE) ||
+- (hw->device_id == E1000_DEV_ID_DH89XXCC_SFP) ||
+- (hw->device_id == E1000_DEV_ID_I354_SGMII)) {
++ (hw->device_id == E1000_DEV_ID_DH89XXCC_SERDES) ||
++ (hw->device_id == E1000_DEV_ID_DH89XXCC_BACKPLANE) ||
++ (hw->device_id == E1000_DEV_ID_DH89XXCC_SFP) ||
++ (hw->device_id == E1000_DEV_ID_I354_SGMII)) {
+ u32 reg;
+
+ /* Disable near end loopback on DH89xxCC */
+- reg = rd32(E1000_MPHY_ADDR_CTL);
++ reg = E1000_READ_REG(hw, E1000_MPHY_ADDR_CTL);
+ reg = (reg & E1000_MPHY_ADDR_CTL_OFFSET_MASK) |
+- E1000_MPHY_PCS_CLK_REG_OFFSET;
+- wr32(E1000_MPHY_ADDR_CTL, reg);
++ E1000_MPHY_PCS_CLK_REG_OFFSET;
++ E1000_WRITE_REG(hw, E1000_MPHY_ADDR_CTL, reg);
+
+- reg = rd32(E1000_MPHY_DATA);
++ reg = E1000_READ_REG(hw, E1000_MPHY_DATA);
+ reg &= ~E1000_MPHY_PCS_CLK_REG_DIGINELBEN;
+- wr32(E1000_MPHY_DATA, reg);
++ E1000_WRITE_REG(hw, E1000_MPHY_DATA, reg);
+ }
+
+- rctl = rd32(E1000_RCTL);
++ rctl = E1000_READ_REG(hw, E1000_RCTL);
+ rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC);
+- wr32(E1000_RCTL, rctl);
++ E1000_WRITE_REG(hw, E1000_RCTL, rctl);
+
+- hw->mac.autoneg = true;
+- igb_read_phy_reg(hw, PHY_CONTROL, &phy_reg);
++ hw->mac.autoneg = TRUE;
++ igb_e1000_read_phy_reg(hw, PHY_CONTROL, &phy_reg);
+ if (phy_reg & MII_CR_LOOPBACK) {
+ phy_reg &= ~MII_CR_LOOPBACK;
+- igb_write_phy_reg(hw, PHY_CONTROL, phy_reg);
+- igb_phy_sw_reset(hw);
++ if (hw->phy.type == I210_I_PHY_ID)
++ igb_e1000_write_phy_reg(hw, I347AT4_PAGE_SELECT, 0);
++ igb_e1000_write_phy_reg(hw, PHY_CONTROL, phy_reg);
++ e1000_phy_commit(hw);
+ }
+ }
+-
+ static void igb_create_lbtest_frame(struct sk_buff *skb,
+ unsigned int frame_size)
+ {
+@@ -1779,19 +1545,25 @@
+
+ frame_size >>= 1;
+
++#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT
++ data = rx_buffer->skb->data;
++#else
+ data = kmap(rx_buffer->page);
++#endif
+
+ if (data[3] != 0xFF ||
+ data[frame_size + 10] != 0xBE ||
+ data[frame_size + 12] != 0xAF)
+ match = false;
+
++#ifndef CONFIG_IGB_DISABLE_PACKET_SPLIT
+ kunmap(rx_buffer->page);
+
++#endif
+ return match;
+ }
+
+-static int igb_clean_test_rings(struct igb_ring *rx_ring,
++static u16 igb_clean_test_rings(struct igb_ring *rx_ring,
+ struct igb_ring *tx_ring,
+ unsigned int size)
+ {
+@@ -1806,13 +1578,17 @@
+ rx_desc = IGB_RX_DESC(rx_ring, rx_ntc);
+
+ while (igb_test_staterr(rx_desc, E1000_RXD_STAT_DD)) {
+- /* check Rx buffer */
++ /* check rx buffer */
+ rx_buffer_info = &rx_ring->rx_buffer_info[rx_ntc];
+
+ /* sync Rx buffer for CPU read */
+ dma_sync_single_for_cpu(rx_ring->dev,
+ rx_buffer_info->dma,
++#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT
++ IGB_RX_HDR_LEN,
++#else
+ IGB_RX_BUFSZ,
++#endif
+ DMA_FROM_DEVICE);
+
+ /* verify contents of skb */
+@@ -1822,14 +1598,18 @@
+ /* sync Rx buffer for device write */
+ dma_sync_single_for_device(rx_ring->dev,
+ rx_buffer_info->dma,
++#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT
++ IGB_RX_HDR_LEN,
++#else
+ IGB_RX_BUFSZ,
++#endif
+ DMA_FROM_DEVICE);
+
+- /* unmap buffer on Tx side */
++ /* unmap buffer on tx side */
+ tx_buffer_info = &tx_ring->tx_buffer_info[tx_ntc];
+ igb_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
+
+- /* increment Rx/Tx next to clean counters */
++ /* increment rx/tx next to clean counters */
+ rx_ntc++;
+ if (rx_ntc == rx_ring->count)
+ rx_ntc = 0;
+@@ -1841,8 +1621,6 @@
+ rx_desc = IGB_RX_DESC(rx_ring, rx_ntc);
+ }
+
+- netdev_tx_reset_queue(txring_txq(tx_ring));
+-
+ /* re-map buffers to ring, store next to clean values */
+ igb_alloc_rx_buffers(rx_ring, count);
+ rx_ring->next_to_clean = rx_ntc;
+@@ -1870,7 +1648,8 @@
+ igb_create_lbtest_frame(skb, size);
+ skb_put(skb, size);
+
+- /* Calculate the loop count based on the largest descriptor ring
++ /*
++ * Calculate the loop count based on the largest descriptor ring
+ * The idea is to wrap the largest ring a number of times using 64
+ * send/receive pairs during each loop
+ */
+@@ -1897,7 +1676,7 @@
+ break;
+ }
+
+- /* allow 200 milliseconds for packets to go from Tx to Rx */
++ /* allow 200 milliseconds for packets to go from tx to rx */
+ msleep(200);
+
+ good_cnt = igb_clean_test_rings(rx_ring, tx_ring, size);
+@@ -1916,21 +1695,14 @@
+ static int igb_loopback_test(struct igb_adapter *adapter, u64 *data)
+ {
+ /* PHY loopback cannot be performed if SoL/IDER
+- * sessions are active
+- */
+- if (igb_check_reset_block(&adapter->hw)) {
+- dev_err(&adapter->pdev->dev,
++ * sessions are active */
++ if (e1000_check_reset_block(&adapter->hw)) {
++ dev_err(pci_dev_to_dev(adapter->pdev),
+ "Cannot do PHY loopback test when SoL/IDER is active.\n");
+ *data = 0;
+ goto out;
+ }
+
+- if (adapter->hw.mac.type == e1000_i354) {
+- dev_info(&adapter->pdev->dev,
+- "Loopback test not supported on i354.\n");
+- *data = 0;
+- goto out;
+- }
+ *data = igb_setup_desc_rings(adapter);
+ if (*data)
+ goto out;
+@@ -1938,6 +1710,7 @@
+ if (*data)
+ goto err_loopback;
+ *data = igb_run_loopback_test(adapter);
++
+ igb_loopback_cleanup(adapter);
+
+ err_loopback:
+@@ -1948,32 +1721,39 @@
+
+ static int igb_link_test(struct igb_adapter *adapter, u64 *data)
+ {
+- struct e1000_hw *hw = &adapter->hw;
++ u32 link;
++ int i, time;
++
+ *data = 0;
+- if (hw->phy.media_type == e1000_media_type_internal_serdes) {
++ time = 0;
++ if (adapter->hw.phy.media_type == e1000_media_type_internal_serdes) {
+ int i = 0;
+-
+- hw->mac.serdes_has_link = false;
++ adapter->hw.mac.serdes_has_link = FALSE;
+
+ /* On some blade server designs, link establishment
+- * could take as long as 2-3 minutes
+- */
++ * could take as long as 2-3 minutes */
+ do {
+- hw->mac.ops.check_for_link(&adapter->hw);
+- if (hw->mac.serdes_has_link)
+- return *data;
++ igb_e1000_check_for_link(&adapter->hw);
++ if (adapter->hw.mac.serdes_has_link)
++ goto out;
+ msleep(20);
+ } while (i++ < 3750);
+
+ *data = 1;
+ } else {
+- hw->mac.ops.check_for_link(&adapter->hw);
+- if (hw->mac.autoneg)
+- msleep(5000);
+-
+- if (!(rd32(E1000_STATUS) & E1000_STATUS_LU))
++ for (i = 0; i < IGB_MAX_LINK_TRIES; i++) {
++ link = igb_has_link(adapter);
++ if (link) {
++ goto out;
++ } else {
++ time++;
++ msleep(1000);
++ }
++ }
++ if (!link)
+ *data = 1;
+ }
++out:
+ return *data;
+ }
+
+@@ -1986,10 +1766,6 @@
+ bool if_running = netif_running(netdev);
+
+ set_bit(__IGB_TESTING, &adapter->state);
+-
+- /* can't do offline tests on media switching devices */
+- if (adapter->hw.dev_spec._82575.mas_capable)
+- eth_test->flags &= ~ETH_TEST_FL_OFFLINE;
+ if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
+ /* Offline tests */
+
+@@ -1998,20 +1774,19 @@
+ forced_speed_duplex = adapter->hw.mac.forced_speed_duplex;
+ autoneg = adapter->hw.mac.autoneg;
+
+- dev_info(&adapter->pdev->dev, "offline testing starting\n");
++ dev_info(pci_dev_to_dev(adapter->pdev), "offline testing starting\n");
+
+ /* power up link for link test */
+ igb_power_up_link(adapter);
+
+ /* Link test performed before hardware reset so autoneg doesn't
+- * interfere with test result
+- */
++ * interfere with test result */
+ if (igb_link_test(adapter, &data[4]))
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+
+ if (if_running)
+ /* indicate we're in test mode */
+- dev_close(netdev);
++ igb_close(netdev);
+ else
+ igb_reset(adapter);
+
+@@ -2027,8 +1802,10 @@
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+
+ igb_reset(adapter);
++
+ /* power up link for loopback test */
+ igb_power_up_link(adapter);
++
+ if (igb_loopback_test(adapter, &data[3]))
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+
+@@ -2038,15 +1815,15 @@
+ adapter->hw.mac.autoneg = autoneg;
+
+ /* force this routine to wait until autoneg complete/timeout */
+- adapter->hw.phy.autoneg_wait_to_complete = true;
++ adapter->hw.phy.autoneg_wait_to_complete = TRUE;
+ igb_reset(adapter);
+- adapter->hw.phy.autoneg_wait_to_complete = false;
++ adapter->hw.phy.autoneg_wait_to_complete = FALSE;
+
+ clear_bit(__IGB_TESTING, &adapter->state);
+ if (if_running)
+- dev_open(netdev);
++ igb_open(netdev);
+ } else {
+- dev_info(&adapter->pdev->dev, "online testing starting\n");
++ dev_info(pci_dev_to_dev(adapter->pdev), "online testing starting\n");
+
+ /* PHY is powered down when interface is down */
+ if (if_running && igb_link_test(adapter, &data[4]))
+@@ -2125,8 +1902,7 @@
+ }
+
+ /* bit defines for adapter->led_status */
+-#define IGB_LED_ON 0
+-
++#ifdef HAVE_ETHTOOL_SET_PHYS_ID
+ static int igb_set_phys_id(struct net_device *netdev,
+ enum ethtool_phys_id_state state)
+ {
+@@ -2135,23 +1911,47 @@
+
+ switch (state) {
+ case ETHTOOL_ID_ACTIVE:
+- igb_blink_led(hw);
++ e1000_blink_led(hw);
+ return 2;
+ case ETHTOOL_ID_ON:
+- igb_blink_led(hw);
++ igb_e1000_led_on(hw);
+ break;
+ case ETHTOOL_ID_OFF:
+- igb_led_off(hw);
++ igb_e1000_led_off(hw);
+ break;
+ case ETHTOOL_ID_INACTIVE:
+- igb_led_off(hw);
+- clear_bit(IGB_LED_ON, &adapter->led_status);
+- igb_cleanup_led(hw);
++ igb_e1000_led_off(hw);
++ igb_e1000_cleanup_led(hw);
+ break;
+ }
+
+ return 0;
+ }
++#else
++static int igb_phys_id(struct net_device *netdev, u32 data)
++{
++ struct igb_adapter *adapter = netdev_priv(netdev);
++ struct e1000_hw *hw = &adapter->hw;
++ unsigned long timeout;
++
++ timeout = data * 1000;
++
++ /*
++ * msleep_interruptable only accepts unsigned int so we are limited
++ * in how long a duration we can wait
++ */
++ if (!timeout || timeout > UINT_MAX)
++ timeout = UINT_MAX;
++
++ e1000_blink_led(hw);
++ msleep_interruptible(timeout);
++
++ igb_e1000_led_off(hw);
++ igb_e1000_cleanup_led(hw);
++
++ return 0;
++}
++#endif /* HAVE_ETHTOOL_SET_PHYS_ID */
+
+ static int igb_set_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *ec)
+@@ -2159,11 +1959,36 @@
+ struct igb_adapter *adapter = netdev_priv(netdev);
+ int i;
+
++ if (ec->rx_max_coalesced_frames ||
++ ec->rx_coalesce_usecs_irq ||
++ ec->rx_max_coalesced_frames_irq ||
++ ec->tx_max_coalesced_frames ||
++ ec->tx_coalesce_usecs_irq ||
++ ec->stats_block_coalesce_usecs ||
++ ec->use_adaptive_rx_coalesce ||
++ ec->use_adaptive_tx_coalesce ||
++ ec->pkt_rate_low ||
++ ec->rx_coalesce_usecs_low ||
++ ec->rx_max_coalesced_frames_low ||
++ ec->tx_coalesce_usecs_low ||
++ ec->tx_max_coalesced_frames_low ||
++ ec->pkt_rate_high ||
++ ec->rx_coalesce_usecs_high ||
++ ec->rx_max_coalesced_frames_high ||
++ ec->tx_coalesce_usecs_high ||
++ ec->tx_max_coalesced_frames_high ||
++ ec->rate_sample_interval) {
++ netdev_err(netdev, "set_coalesce: invalid parameter");
++ return -ENOTSUPP;
++ }
++
+ if ((ec->rx_coalesce_usecs > IGB_MAX_ITR_USECS) ||
+ ((ec->rx_coalesce_usecs > 3) &&
+ (ec->rx_coalesce_usecs < IGB_MIN_ITR_USECS)) ||
+- (ec->rx_coalesce_usecs == 2))
++ (ec->rx_coalesce_usecs == 2)) {
++ netdev_err(netdev, "set_coalesce: invalid setting");
+ return -EINVAL;
++ }
+
+ if ((ec->tx_coalesce_usecs > IGB_MAX_ITR_USECS) ||
+ ((ec->tx_coalesce_usecs > 3) &&
+@@ -2174,11 +1999,12 @@
+ if ((adapter->flags & IGB_FLAG_QUEUE_PAIRS) && ec->tx_coalesce_usecs)
+ return -EINVAL;
+
++ if (ec->tx_max_coalesced_frames_irq)
++ adapter->tx_work_limit = ec->tx_max_coalesced_frames_irq;
++
+ /* If ITR is disabled, disable DMAC */
+- if (ec->rx_coalesce_usecs == 0) {
+- if (adapter->flags & IGB_FLAG_DMAC)
+- adapter->flags &= ~IGB_FLAG_DMAC;
+- }
++ if (ec->rx_coalesce_usecs == 0)
++ adapter->dmac = IGB_DMAC_DISABLE;
+
+ /* convert to rate of irq's per second */
+ if (ec->rx_coalesce_usecs && ec->rx_coalesce_usecs <= 3)
+@@ -2219,6 +2045,8 @@
+ else
+ ec->rx_coalesce_usecs = adapter->rx_itr_setting >> 2;
+
++ ec->tx_max_coalesced_frames_irq = adapter->tx_work_limit;
++
+ if (!(adapter->flags & IGB_FLAG_QUEUE_PAIRS)) {
+ if (adapter->tx_itr_setting <= 3)
+ ec->tx_coalesce_usecs = adapter->tx_itr_setting;
+@@ -2237,6 +2065,7 @@
+ return 0;
+ }
+
++#ifdef HAVE_ETHTOOL_GET_SSET_COUNT
+ static int igb_get_sset_count(struct net_device *netdev, int sset)
+ {
+ switch (sset) {
+@@ -2248,19 +2077,32 @@
+ return -ENOTSUPP;
+ }
+ }
++#else
++static int igb_get_stats_count(struct net_device *netdev)
++{
++ return IGB_STATS_LEN;
++}
++
++static int igb_diag_test_count(struct net_device *netdev)
++{
++ return IGB_TEST_LEN;
++}
++#endif
+
+ static void igb_get_ethtool_stats(struct net_device *netdev,
+ struct ethtool_stats *stats, u64 *data)
+ {
+ struct igb_adapter *adapter = netdev_priv(netdev);
+- struct rtnl_link_stats64 *net_stats = &adapter->stats64;
+- unsigned int start;
+- struct igb_ring *ring;
+- int i, j;
++#ifdef HAVE_NETDEV_STATS_IN_NETDEV
++ struct net_device_stats *net_stats = &netdev->stats;
++#else
++ struct net_device_stats *net_stats = &adapter->net_stats;
++#endif
++ u64 *queue_stat;
++ int i, j, k;
+ char *p;
+
+- spin_lock(&adapter->stats64_lock);
+- igb_update_stats(adapter, net_stats);
++ igb_update_stats(adapter);
+
+ for (i = 0; i < IGB_GLOBAL_STATS_LEN; i++) {
+ p = (char *)adapter + igb_gstrings_stats[i].stat_offset;
+@@ -2273,36 +2115,15 @@
+ sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
+ }
+ for (j = 0; j < adapter->num_tx_queues; j++) {
+- u64 restart2;
+-
+- ring = adapter->tx_ring[j];
+- do {
+- start = u64_stats_fetch_begin_irq(&ring->tx_syncp);
+- data[i] = ring->tx_stats.packets;
+- data[i+1] = ring->tx_stats.bytes;
+- data[i+2] = ring->tx_stats.restart_queue;
+- } while (u64_stats_fetch_retry_irq(&ring->tx_syncp, start));
+- do {
+- start = u64_stats_fetch_begin_irq(&ring->tx_syncp2);
+- restart2 = ring->tx_stats.restart_queue2;
+- } while (u64_stats_fetch_retry_irq(&ring->tx_syncp2, start));
+- data[i+2] += restart2;
+-
+- i += IGB_TX_QUEUE_STATS_LEN;
++ queue_stat = (u64 *)&adapter->tx_ring[j]->tx_stats;
++ for (k = 0; k < IGB_TX_QUEUE_STATS_LEN; k++, i++)
++ data[i] = queue_stat[k];
+ }
+ for (j = 0; j < adapter->num_rx_queues; j++) {
+- ring = adapter->rx_ring[j];
+- do {
+- start = u64_stats_fetch_begin_irq(&ring->rx_syncp);
+- data[i] = ring->rx_stats.packets;
+- data[i+1] = ring->rx_stats.bytes;
+- data[i+2] = ring->rx_stats.drops;
+- data[i+3] = ring->rx_stats.csum_err;
+- data[i+4] = ring->rx_stats.alloc_failed;
+- } while (u64_stats_fetch_retry_irq(&ring->rx_syncp, start));
+- i += IGB_RX_QUEUE_STATS_LEN;
++ queue_stat = (u64 *)&adapter->rx_ring[j]->rx_stats;
++ for (k = 0; k < IGB_RX_QUEUE_STATS_LEN; k++, i++)
++ data[i] = queue_stat[k];
+ }
+- spin_unlock(&adapter->stats64_lock);
+ }
+
+ static void igb_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
+@@ -2347,22 +2168,19 @@
+ sprintf(p, "rx_queue_%u_alloc_failed", i);
+ p += ETH_GSTRING_LEN;
+ }
+- /* BUG_ON(p - data != IGB_STATS_LEN * ETH_GSTRING_LEN); */
++/* BUG_ON(p - data != IGB_STATS_LEN * ETH_GSTRING_LEN); */
+ break;
+ }
+ }
+
++#ifdef HAVE_ETHTOOL_GET_TS_INFO
+ static int igb_get_ts_info(struct net_device *dev,
+ struct ethtool_ts_info *info)
+ {
+ struct igb_adapter *adapter = netdev_priv(dev);
+
+- if (adapter->ptp_clock)
+- info->phc_index = ptp_clock_index(adapter->ptp_clock);
+- else
+- info->phc_index = -1;
+-
+ switch (adapter->hw.mac.type) {
++#ifdef HAVE_PTP_1588_CLOCK
+ case e1000_82575:
+ info->so_timestamping =
+ SOF_TIMESTAMPING_TX_SOFTWARE |
+@@ -2383,6 +2201,11 @@
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+
++ if (adapter->ptp_clock)
++ info->phc_index = ptp_clock_index(adapter->ptp_clock);
++ else
++ info->phc_index = -1;
++
+ info->tx_types =
+ (1 << HWTSTAMP_TX_OFF) |
+ (1 << HWTSTAMP_TX_ON);
+@@ -2396,201 +2219,217 @@
+ info->rx_filters |=
+ (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
+ (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
+- (1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) |
+- (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) |
+- (1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) |
+- (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_EVENT);
+
+ return 0;
++#endif /* HAVE_PTP_1588_CLOCK */
+ default:
+ return -EOPNOTSUPP;
+ }
+ }
++#endif /* HAVE_ETHTOOL_GET_TS_INFO */
+
+-static int igb_get_rss_hash_opts(struct igb_adapter *adapter,
+- struct ethtool_rxnfc *cmd)
++#ifdef CONFIG_PM_RUNTIME
++static int igb_ethtool_begin(struct net_device *netdev)
+ {
+- cmd->data = 0;
++ struct igb_adapter *adapter = netdev_priv(netdev);
+
+- /* Report default options for RSS on igb */
+- switch (cmd->flow_type) {
+- case TCP_V4_FLOW:
+- cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+- /* Fall through */
+- case UDP_V4_FLOW:
+- if (adapter->flags & IGB_FLAG_RSS_FIELD_IPV4_UDP)
+- cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+- /* Fall through */
+- case SCTP_V4_FLOW:
+- case AH_ESP_V4_FLOW:
+- case AH_V4_FLOW:
+- case ESP_V4_FLOW:
+- case IPV4_FLOW:
+- cmd->data |= RXH_IP_SRC | RXH_IP_DST;
+- break;
+- case TCP_V6_FLOW:
+- cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+- /* Fall through */
+- case UDP_V6_FLOW:
+- if (adapter->flags & IGB_FLAG_RSS_FIELD_IPV6_UDP)
+- cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+- /* Fall through */
+- case SCTP_V6_FLOW:
+- case AH_ESP_V6_FLOW:
+- case AH_V6_FLOW:
+- case ESP_V6_FLOW:
+- case IPV6_FLOW:
+- cmd->data |= RXH_IP_SRC | RXH_IP_DST;
+- break;
+- default:
+- return -EINVAL;
+- }
++ pm_runtime_get_sync(&adapter->pdev->dev);
+
+ return 0;
+ }
+
+-static int igb_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
+- u32 *rule_locs)
++static void igb_ethtool_complete(struct net_device *netdev)
+ {
+- struct igb_adapter *adapter = netdev_priv(dev);
+- int ret = -EOPNOTSUPP;
++ struct igb_adapter *adapter = netdev_priv(netdev);
+
+- switch (cmd->cmd) {
+- case ETHTOOL_GRXRINGS:
+- cmd->data = adapter->num_rx_queues;
+- ret = 0;
+- break;
+- case ETHTOOL_GRXFH:
+- ret = igb_get_rss_hash_opts(adapter, cmd);
+- break;
+- default:
+- break;
+- }
++ pm_runtime_put(&adapter->pdev->dev);
++}
++#endif /* CONFIG_PM_RUNTIME */
+
+- return ret;
++#ifndef HAVE_NDO_SET_FEATURES
++static u32 igb_get_rx_csum(struct net_device *netdev)
++{
++ return !!(netdev->features & NETIF_F_RXCSUM);
+ }
+
+-#define UDP_RSS_FLAGS (IGB_FLAG_RSS_FIELD_IPV4_UDP | \
+- IGB_FLAG_RSS_FIELD_IPV6_UDP)
+-static int igb_set_rss_hash_opt(struct igb_adapter *adapter,
+- struct ethtool_rxnfc *nfc)
++static int igb_set_rx_csum(struct net_device *netdev, u32 data)
+ {
+- u32 flags = adapter->flags;
++ const u32 feature_list = NETIF_F_RXCSUM;
+
+- /* RSS does not support anything other than hashing
+- * to queues on src and dst IPs and ports
++ if (data)
++ netdev->features |= feature_list;
++ else
++ netdev->features &= ~feature_list;
++
++ return 0;
++}
++
++static int igb_set_tx_csum(struct net_device *netdev, u32 data)
++{
++ struct igb_adapter *adapter = netdev_priv(netdev);
++#ifdef NETIF_F_IPV6_CSUM
++ u32 feature_list = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
++#else
++ u32 feature_list = NETIF_F_IP_CSUM;
++#endif
++
++ if (adapter->hw.mac.type >= e1000_82576)
++ feature_list |= NETIF_F_SCTP_CSUM;
++
++ if (data)
++ netdev->features |= feature_list;
++ else
++ netdev->features &= ~feature_list;
++
++ return 0;
++}
++
++#ifdef NETIF_F_TSO
++static int igb_set_tso(struct net_device *netdev, u32 data)
++{
++#ifdef NETIF_F_TSO6
++ const u32 feature_list = NETIF_F_TSO | NETIF_F_TSO6;
++#else
++ const u32 feature_list = NETIF_F_TSO;
++#endif
++
++ if (data)
++ netdev->features |= feature_list;
++ else
++ netdev->features &= ~feature_list;
++
++#ifndef HAVE_NETDEV_VLAN_FEATURES
++ if (!data) {
++ struct igb_adapter *adapter = netdev_priv(netdev);
++ struct net_device *v_netdev;
++ int i;
++
++ /* disable TSO on all VLANs if they're present */
++ if (!adapter->vlgrp)
++ goto tso_out;
++
++ for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) {
++ v_netdev = vlan_group_get_device(adapter->vlgrp, i);
++ if (!v_netdev)
++ continue;
++
++ v_netdev->features &= ~feature_list;
++ vlan_group_set_device(adapter->vlgrp, i, v_netdev);
++ }
++ }
++
++tso_out:
++
++#endif /* HAVE_NETDEV_VLAN_FEATURES */
++ return 0;
++}
++
++#endif /* NETIF_F_TSO */
++#ifdef ETHTOOL_GFLAGS
++static int igb_set_flags(struct net_device *netdev, u32 data)
++{
++ u32 supported_flags = ETH_FLAG_RXVLAN | ETH_FLAG_TXVLAN |
++ ETH_FLAG_RXHASH;
++#ifndef HAVE_VLAN_RX_REGISTER
++ u32 changed = netdev->features ^ data;
++#endif
++ int rc;
++#ifndef IGB_NO_LRO
++
++ supported_flags |= ETH_FLAG_LRO;
++#endif
++ /*
++ * Since there is no support for separate tx vlan accel
++ * enabled make sure tx flag is cleared if rx is.
+ */
+- if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
+- RXH_L4_B_0_1 | RXH_L4_B_2_3))
+- return -EINVAL;
++ if (!(data & ETH_FLAG_RXVLAN))
++ data &= ~ETH_FLAG_TXVLAN;
+
+- switch (nfc->flow_type) {
+- case TCP_V4_FLOW:
+- case TCP_V6_FLOW:
+- if (!(nfc->data & RXH_IP_SRC) ||
+- !(nfc->data & RXH_IP_DST) ||
+- !(nfc->data & RXH_L4_B_0_1) ||
+- !(nfc->data & RXH_L4_B_2_3))
+- return -EINVAL;
+- break;
+- case UDP_V4_FLOW:
+- if (!(nfc->data & RXH_IP_SRC) ||
+- !(nfc->data & RXH_IP_DST))
+- return -EINVAL;
+- switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
+- case 0:
+- flags &= ~IGB_FLAG_RSS_FIELD_IPV4_UDP;
+- break;
+- case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
+- flags |= IGB_FLAG_RSS_FIELD_IPV4_UDP;
+- break;
+- default:
+- return -EINVAL;
+- }
+- break;
+- case UDP_V6_FLOW:
+- if (!(nfc->data & RXH_IP_SRC) ||
+- !(nfc->data & RXH_IP_DST))
+- return -EINVAL;
+- switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
+- case 0:
+- flags &= ~IGB_FLAG_RSS_FIELD_IPV6_UDP;
+- break;
+- case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
+- flags |= IGB_FLAG_RSS_FIELD_IPV6_UDP;
+- break;
+- default:
+- return -EINVAL;
+- }
+- break;
+- case AH_ESP_V4_FLOW:
+- case AH_V4_FLOW:
+- case ESP_V4_FLOW:
+- case SCTP_V4_FLOW:
+- case AH_ESP_V6_FLOW:
+- case AH_V6_FLOW:
+- case ESP_V6_FLOW:
+- case SCTP_V6_FLOW:
+- if (!(nfc->data & RXH_IP_SRC) ||
+- !(nfc->data & RXH_IP_DST) ||
+- (nfc->data & RXH_L4_B_0_1) ||
+- (nfc->data & RXH_L4_B_2_3))
+- return -EINVAL;
+- break;
+- default:
+- return -EINVAL;
+- }
+-
+- /* if we changed something we need to update flags */
+- if (flags != adapter->flags) {
+- struct e1000_hw *hw = &adapter->hw;
+- u32 mrqc = rd32(E1000_MRQC);
+-
+- if ((flags & UDP_RSS_FLAGS) &&
+- !(adapter->flags & UDP_RSS_FLAGS))
+- dev_err(&adapter->pdev->dev,
+- "enabling UDP RSS: fragmented packets may arrive out of order to the stack above\n");
+-
+- adapter->flags = flags;
+-
+- /* Perform hash on these packet types */
+- mrqc |= E1000_MRQC_RSS_FIELD_IPV4 |
+- E1000_MRQC_RSS_FIELD_IPV4_TCP |
+- E1000_MRQC_RSS_FIELD_IPV6 |
+- E1000_MRQC_RSS_FIELD_IPV6_TCP;
+-
+- mrqc &= ~(E1000_MRQC_RSS_FIELD_IPV4_UDP |
+- E1000_MRQC_RSS_FIELD_IPV6_UDP);
+-
+- if (flags & IGB_FLAG_RSS_FIELD_IPV4_UDP)
+- mrqc |= E1000_MRQC_RSS_FIELD_IPV4_UDP;
+-
+- if (flags & IGB_FLAG_RSS_FIELD_IPV6_UDP)
+- mrqc |= E1000_MRQC_RSS_FIELD_IPV6_UDP;
+-
+- wr32(E1000_MRQC, mrqc);
+- }
++ rc = ethtool_op_set_flags(netdev, data, supported_flags);
++ if (rc)
++ return rc;
++#ifndef HAVE_VLAN_RX_REGISTER
++
++ if (changed & ETH_FLAG_RXVLAN)
++ igb_vlan_mode(netdev, data);
++#endif
+
+ return 0;
+ }
+
+-static int igb_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
++#endif /* ETHTOOL_GFLAGS */
++#endif /* HAVE_NDO_SET_FEATURES */
++#ifdef ETHTOOL_SADV_COAL
++static int igb_set_adv_coal(struct net_device *netdev,
++ struct ethtool_value *edata)
+ {
+- struct igb_adapter *adapter = netdev_priv(dev);
+- int ret = -EOPNOTSUPP;
++ struct igb_adapter *adapter = netdev_priv(netdev);
+
+- switch (cmd->cmd) {
+- case ETHTOOL_SRXFH:
+- ret = igb_set_rss_hash_opt(adapter, cmd);
++ switch (edata->data) {
++ case IGB_DMAC_DISABLE:
++ adapter->dmac = edata->data;
+ break;
+- default:
++ case IGB_DMAC_MIN:
++ adapter->dmac = edata->data;
++ break;
++ case IGB_DMAC_500:
++ adapter->dmac = edata->data;
++ break;
++ case IGB_DMAC_EN_DEFAULT:
++ adapter->dmac = edata->data;
++ break;
++ case IGB_DMAC_2000:
++ adapter->dmac = edata->data;
++ break;
++ case IGB_DMAC_3000:
++ adapter->dmac = edata->data;
++ break;
++ case IGB_DMAC_4000:
++ adapter->dmac = edata->data;
++ break;
++ case IGB_DMAC_5000:
++ adapter->dmac = edata->data;
++ break;
++ case IGB_DMAC_6000:
++ adapter->dmac = edata->data;
++ break;
++ case IGB_DMAC_7000:
++ adapter->dmac = edata->data;
++ break;
++ case IGB_DMAC_8000:
++ adapter->dmac = edata->data;
++ break;
++ case IGB_DMAC_9000:
++ adapter->dmac = edata->data;
++ break;
++ case IGB_DMAC_MAX:
++ adapter->dmac = edata->data;
+ break;
++ default:
++ adapter->dmac = IGB_DMAC_DISABLE;
++ netdev_info(netdev,
++ "set_dmac: invalid setting, setting DMAC to %d\n",
++ adapter->dmac);
+ }
++ netdev_info(netdev, "%s: setting DMAC to %d\n",
++ netdev->name, adapter->dmac);
++ return 0;
++}
+
+- return ret;
++#endif /* ETHTOOL_SADV_COAL */
++#ifdef ETHTOOL_GADV_COAL
++static void igb_get_dmac(struct net_device *netdev,
++ struct ethtool_value *edata)
++{
++ struct igb_adapter *adapter = netdev_priv(netdev);
++ edata->data = adapter->dmac;
++
++ return;
+ }
++#endif
+
++#ifdef ETHTOOL_GEEE
+ static int igb_get_eee(struct net_device *netdev, struct ethtool_eee *edata)
+ {
+ struct igb_adapter *adapter = netdev_priv(netdev);
+@@ -2604,17 +2443,18 @@
+
+ edata->supported = (SUPPORTED_1000baseT_Full |
+ SUPPORTED_100baseT_Full);
++
+ if (!hw->dev_spec._82575.eee_disable)
+ edata->advertised =
+ mmd_eee_adv_to_ethtool_adv_t(adapter->eee_advert);
+
+ /* The IPCNFG and EEER registers are not supported on I354. */
+ if (hw->mac.type == e1000_i354) {
+- igb_get_eee_status_i354(hw, (bool *)&edata->eee_active);
++ e1000_get_eee_status_i354(hw, (bool *)&edata->eee_active);
+ } else {
+ u32 eeer;
+
+- eeer = rd32(E1000_EEER);
++ eeer = E1000_READ_REG(hw, E1000_EEER);
+
+ /* EEE status on negotiated link */
+ if (eeer & E1000_EEER_EEE_NEG)
+@@ -2627,19 +2467,20 @@
+ /* EEE Link Partner Advertised */
+ switch (hw->mac.type) {
+ case e1000_i350:
+- ret_val = igb_read_emi_reg(hw, E1000_EEE_LP_ADV_ADDR_I350,
+- &phy_data);
++ ret_val = e1000_read_emi_reg(hw, E1000_EEE_LP_ADV_ADDR_I350,
++ &phy_data);
+ if (ret_val)
+ return -ENODATA;
+
+ edata->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(phy_data);
++
+ break;
+ case e1000_i354:
+ case e1000_i210:
+ case e1000_i211:
+- ret_val = igb_read_xmdio_reg(hw, E1000_EEE_LP_ADV_ADDR_I210,
+- E1000_EEE_LP_ADV_DEV_I210,
+- &phy_data);
++ ret_val = e1000_read_xmdio_reg(hw, E1000_EEE_LP_ADV_ADDR_I210,
++ E1000_EEE_LP_ADV_DEV_I210,
++ &phy_data);
+ if (ret_val)
+ return -ENODATA;
+
+@@ -2656,7 +2497,8 @@
+ (edata->eee_enabled))
+ edata->tx_lpi_enabled = true;
+
+- /* Report correct negotiated EEE status for devices that
++ /*
++ * report correct negotiated EEE status for devices that
+ * wrongly report EEE at half-duplex
+ */
+ if (adapter->link_duplex == HALF_DUPLEX) {
+@@ -2668,60 +2510,59 @@
+
+ return 0;
+ }
++#endif
+
++#ifdef ETHTOOL_SEEE
+ static int igb_set_eee(struct net_device *netdev,
+ struct ethtool_eee *edata)
+ {
+ struct igb_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ struct ethtool_eee eee_curr;
++ bool adv1g_eee = true, adv100m_eee = true;
+ s32 ret_val;
+
+ if ((hw->mac.type < e1000_i350) ||
+ (hw->phy.media_type != e1000_media_type_copper))
+ return -EOPNOTSUPP;
+
+- memset(&eee_curr, 0, sizeof(struct ethtool_eee));
+-
+ ret_val = igb_get_eee(netdev, &eee_curr);
+ if (ret_val)
+ return ret_val;
+
+ if (eee_curr.eee_enabled) {
+ if (eee_curr.tx_lpi_enabled != edata->tx_lpi_enabled) {
+- dev_err(&adapter->pdev->dev,
++ dev_err(pci_dev_to_dev(adapter->pdev),
+ "Setting EEE tx-lpi is not supported\n");
+ return -EINVAL;
+ }
+
+- /* Tx LPI timer is not implemented currently */
++ /* Tx LPI time is not implemented currently */
+ if (edata->tx_lpi_timer) {
+- dev_err(&adapter->pdev->dev,
++ dev_err(pci_dev_to_dev(adapter->pdev),
+ "Setting EEE Tx LPI timer is not supported\n");
+ return -EINVAL;
+ }
+
+- if (edata->advertised &
+- ~(ADVERTISE_100_FULL | ADVERTISE_1000_FULL)) {
+- dev_err(&adapter->pdev->dev,
+- "EEE Advertisement supports only 100Tx and or 100T full duplex\n");
++ if (!edata->advertised || (edata->advertised &
++ ~(ADVERTISE_100_FULL | ADVERTISE_1000_FULL))) {
++ dev_err(pci_dev_to_dev(adapter->pdev),
++ "EEE Advertisement supports 100Base-Tx Full Duplex(0x08) 1000Base-T Full Duplex(0x20) or both(0x28)\n");
+ return -EINVAL;
+ }
++ adv100m_eee = !!(edata->advertised & ADVERTISE_100_FULL);
++ adv1g_eee = !!(edata->advertised & ADVERTISE_1000_FULL);
+
+ } else if (!edata->eee_enabled) {
+- dev_err(&adapter->pdev->dev,
+- "Setting EEE options are not supported with EEE disabled\n");
++ dev_err(pci_dev_to_dev(adapter->pdev),
++ "Setting EEE options is not supported with EEE disabled\n");
+ return -EINVAL;
+ }
+
+ adapter->eee_advert = ethtool_adv_to_mmd_eee_adv_t(edata->advertised);
++
+ if (hw->dev_spec._82575.eee_disable != !edata->eee_enabled) {
+ hw->dev_spec._82575.eee_disable = !edata->eee_enabled;
+- adapter->flags |= IGB_FLAG_EEE;
+- if (hw->mac.type == e1000_i350)
+- igb_set_eee_i350(hw);
+- else
+- igb_set_eee_i354(hw);
+
+ /* reset link */
+ if (netif_running(netdev))
+@@ -2730,109 +2571,232 @@
+ igb_reset(adapter);
+ }
+
++ if (hw->mac.type == e1000_i354)
++ ret_val = e1000_set_eee_i354(hw, adv1g_eee, adv100m_eee);
++ else
++ ret_val = e1000_set_eee_i350(hw, adv1g_eee, adv100m_eee);
++
++ if (ret_val) {
++ dev_err(pci_dev_to_dev(adapter->pdev),
++ "Problem setting EEE advertisement options\n");
++ return -EINVAL;
++ }
++
+ return 0;
+ }
++#endif /* ETHTOOL_SEEE */
++#ifdef ETHTOOL_GRXFH
++#ifdef ETHTOOL_GRXFHINDIR
+
+-static int igb_get_module_info(struct net_device *netdev,
+- struct ethtool_modinfo *modinfo)
++static int igb_get_rss_hash_opts(struct igb_adapter *adapter,
++ struct ethtool_rxnfc *cmd)
+ {
+- struct igb_adapter *adapter = netdev_priv(netdev);
+- struct e1000_hw *hw = &adapter->hw;
+- u32 status = 0;
+- u16 sff8472_rev, addr_mode;
+- bool page_swap = false;
+-
+- if ((hw->phy.media_type == e1000_media_type_copper) ||
+- (hw->phy.media_type == e1000_media_type_unknown))
+- return -EOPNOTSUPP;
++ cmd->data = 0;
+
+- /* Check whether we support SFF-8472 or not */
+- status = igb_read_phy_reg_i2c(hw, IGB_SFF_8472_COMP, &sff8472_rev);
+- if (status)
+- return -EIO;
+-
+- /* addressing mode is not supported */
+- status = igb_read_phy_reg_i2c(hw, IGB_SFF_8472_SWAP, &addr_mode);
+- if (status)
+- return -EIO;
+-
+- /* addressing mode is not supported */
+- if ((addr_mode & 0xFF) & IGB_SFF_ADDRESSING_MODE) {
+- hw_dbg("Address change required to access page 0xA2, but not supported. Please report the module type to the driver maintainers.\n");
+- page_swap = true;
+- }
+-
+- if ((sff8472_rev & 0xFF) == IGB_SFF_8472_UNSUP || page_swap) {
+- /* We have an SFP, but it does not support SFF-8472 */
+- modinfo->type = ETH_MODULE_SFF_8079;
+- modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN;
+- } else {
+- /* We have an SFP which supports a revision of SFF-8472 */
+- modinfo->type = ETH_MODULE_SFF_8472;
+- modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
++ /* Report default options for RSS on igb */
++ switch (cmd->flow_type) {
++ case TCP_V4_FLOW:
++ cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
++ /* Fall through */
++ case UDP_V4_FLOW:
++ if (adapter->flags & IGB_FLAG_RSS_FIELD_IPV4_UDP)
++ cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
++ /* Fall through */
++ case SCTP_V4_FLOW:
++ case AH_ESP_V4_FLOW:
++ case AH_V4_FLOW:
++ case ESP_V4_FLOW:
++ case IPV4_FLOW:
++ cmd->data |= RXH_IP_SRC | RXH_IP_DST;
++ break;
++ case TCP_V6_FLOW:
++ cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
++ /* Fall through */
++ case UDP_V6_FLOW:
++ if (adapter->flags & IGB_FLAG_RSS_FIELD_IPV6_UDP)
++ cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
++ /* Fall through */
++ case SCTP_V6_FLOW:
++ case AH_ESP_V6_FLOW:
++ case AH_V6_FLOW:
++ case ESP_V6_FLOW:
++ case IPV6_FLOW:
++ cmd->data |= RXH_IP_SRC | RXH_IP_DST;
++ break;
++ default:
++ return -EINVAL;
+ }
+
+ return 0;
+ }
+
+-static int igb_get_module_eeprom(struct net_device *netdev,
+- struct ethtool_eeprom *ee, u8 *data)
++#endif /* ETHTOOL_GRXFHINDIR */
++static int igb_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
++#ifdef HAVE_ETHTOOL_GET_RXNFC_VOID_RULE_LOCS
++ void *rule_locs)
++#else
++ u32 *rule_locs)
++#endif
+ {
+- struct igb_adapter *adapter = netdev_priv(netdev);
+- struct e1000_hw *hw = &adapter->hw;
+- u32 status = 0;
+- u16 *dataword;
+- u16 first_word, last_word;
+- int i = 0;
++ struct igb_adapter *adapter = netdev_priv(dev);
++ int ret = -EOPNOTSUPP;
+
+- if (ee->len == 0)
+- return -EINVAL;
++ switch (cmd->cmd) {
++ case ETHTOOL_GRXRINGS:
++ cmd->data = adapter->num_rx_queues;
++ ret = 0;
++ break;
++#ifdef ETHTOOL_GRXFHINDIR
++ case ETHTOOL_GRXFHINDIR:
++ ret = igb_get_rss_hash_opts(adapter, cmd);
++ break;
++#endif /* ETHTOOL_GRXFHINDIR */
++ default:
++ break;
++ }
+
+- first_word = ee->offset >> 1;
+- last_word = (ee->offset + ee->len - 1) >> 1;
++ return ret;
++}
+
+- dataword = kmalloc(sizeof(u16) * (last_word - first_word + 1),
+- GFP_KERNEL);
+- if (!dataword)
+- return -ENOMEM;
++#define UDP_RSS_FLAGS (IGB_FLAG_RSS_FIELD_IPV4_UDP | \
++ IGB_FLAG_RSS_FIELD_IPV6_UDP)
++static int igb_set_rss_hash_opt(struct igb_adapter *adapter,
++ struct ethtool_rxnfc *nfc)
++{
++ u32 flags = adapter->flags;
+
+- /* Read EEPROM block, SFF-8079/SFF-8472, word at a time */
+- for (i = 0; i < last_word - first_word + 1; i++) {
+- status = igb_read_phy_reg_i2c(hw, first_word + i, &dataword[i]);
+- if (status) {
+- /* Error occurred while reading module */
+- kfree(dataword);
+- return -EIO;
+- }
++ /*
++ * RSS does not support anything other than hashing
++ * to queues on src and dst IPs and ports
++ */
++ if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
++ RXH_L4_B_0_1 | RXH_L4_B_2_3))
++ return -EINVAL;
+
+- be16_to_cpus(&dataword[i]);
++ switch (nfc->flow_type) {
++ case TCP_V4_FLOW:
++ case TCP_V6_FLOW:
++ if (!(nfc->data & RXH_IP_SRC) ||
++ !(nfc->data & RXH_IP_DST) ||
++ !(nfc->data & RXH_L4_B_0_1) ||
++ !(nfc->data & RXH_L4_B_2_3))
++ return -EINVAL;
++ break;
++ case UDP_V4_FLOW:
++ if (!(nfc->data & RXH_IP_SRC) ||
++ !(nfc->data & RXH_IP_DST))
++ return -EINVAL;
++ switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
++ case 0:
++ flags &= ~IGB_FLAG_RSS_FIELD_IPV4_UDP;
++ break;
++ case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
++ flags |= IGB_FLAG_RSS_FIELD_IPV4_UDP;
++ break;
++ default:
++ return -EINVAL;
++ }
++ break;
++ case UDP_V6_FLOW:
++ if (!(nfc->data & RXH_IP_SRC) ||
++ !(nfc->data & RXH_IP_DST))
++ return -EINVAL;
++ switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
++ case 0:
++ flags &= ~IGB_FLAG_RSS_FIELD_IPV6_UDP;
++ break;
++ case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
++ flags |= IGB_FLAG_RSS_FIELD_IPV6_UDP;
++ break;
++ default:
++ return -EINVAL;
++ }
++ break;
++ case AH_ESP_V4_FLOW:
++ case AH_V4_FLOW:
++ case ESP_V4_FLOW:
++ case SCTP_V4_FLOW:
++ case AH_ESP_V6_FLOW:
++ case AH_V6_FLOW:
++ case ESP_V6_FLOW:
++ case SCTP_V6_FLOW:
++ if (!(nfc->data & RXH_IP_SRC) ||
++ !(nfc->data & RXH_IP_DST) ||
++ (nfc->data & RXH_L4_B_0_1) ||
++ (nfc->data & RXH_L4_B_2_3))
++ return -EINVAL;
++ break;
++ default:
++ return -EINVAL;
+ }
+
+- memcpy(data, (u8 *)dataword + (ee->offset & 1), ee->len);
+- kfree(dataword);
++ /* if we changed something we need to update flags */
++ if (flags != adapter->flags) {
++ struct e1000_hw *hw = &adapter->hw;
++ u32 mrqc = E1000_READ_REG(hw, E1000_MRQC);
+
+- return 0;
+-}
++ if ((flags & UDP_RSS_FLAGS) &&
++ !(adapter->flags & UDP_RSS_FLAGS))
++ DPRINTK(DRV, WARNING,
++ "enabling UDP RSS: fragmented packets may arrive out of order to the stack above\n");
++
++ adapter->flags = flags;
++
++ /* Perform hash on these packet types */
++ mrqc |= E1000_MRQC_RSS_FIELD_IPV4 |
++ E1000_MRQC_RSS_FIELD_IPV4_TCP |
++ E1000_MRQC_RSS_FIELD_IPV6 |
++ E1000_MRQC_RSS_FIELD_IPV6_TCP;
++
++ mrqc &= ~(E1000_MRQC_RSS_FIELD_IPV4_UDP |
++ E1000_MRQC_RSS_FIELD_IPV6_UDP);
++
++ if (flags & IGB_FLAG_RSS_FIELD_IPV4_UDP)
++ mrqc |= E1000_MRQC_RSS_FIELD_IPV4_UDP;
++
++ if (flags & IGB_FLAG_RSS_FIELD_IPV6_UDP)
++ mrqc |= E1000_MRQC_RSS_FIELD_IPV6_UDP;
++
++ E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
++ }
+
+-static int igb_ethtool_begin(struct net_device *netdev)
+-{
+- struct igb_adapter *adapter = netdev_priv(netdev);
+- pm_runtime_get_sync(&adapter->pdev->dev);
+ return 0;
+ }
+
+-static void igb_ethtool_complete(struct net_device *netdev)
++static int igb_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
+ {
+- struct igb_adapter *adapter = netdev_priv(netdev);
+- pm_runtime_put(&adapter->pdev->dev);
++ struct igb_adapter *adapter = netdev_priv(dev);
++ int ret = -EOPNOTSUPP;
++
++ switch (cmd->cmd) {
++ case ETHTOOL_SRXFH:
++ ret = igb_set_rss_hash_opt(adapter, cmd);
++ break;
++ default:
++ break;
++ }
++
++ return ret;
+ }
+
++#endif /* ETHTOOL_GRXFH */
++#ifdef ETHTOOL_GRXFHINDIR
++#ifdef HAVE_ETHTOOL_GRXFHINDIR_SIZE
+ static u32 igb_get_rxfh_indir_size(struct net_device *netdev)
+ {
+ return IGB_RETA_SIZE;
+ }
+
++#if (defined(ETHTOOL_GRSSH) && !defined(HAVE_ETHTOOL_GSRSSH))
++#ifdef HAVE_RXFH_HASHFUNC
++static int igb_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
++ u8 *hfunc)
++#else
+ static int igb_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key)
++#endif /* HAVE_RXFH_HASHFUNC */
++#else
++static int igb_get_rxfh_indir(struct net_device *netdev, u32 *indir)
++#endif /* HAVE_ETHTOOL_GSRSSH */
+ {
+ struct igb_adapter *adapter = netdev_priv(netdev);
+ int i;
+@@ -2843,6 +2807,22 @@
+ return 0;
+ }
+
++#else
++static int igb_get_rxfh_indir(struct net_device *netdev,
++ struct ethtool_rxfh_indir *indir)
++{
++ struct igb_adapter *adapter = netdev_priv(netdev);
++ size_t copy_size =
++ min_t(size_t, indir->size, ARRAY_SIZE(adapter->rss_indir_tbl));
++
++ indir->size = ARRAY_SIZE(adapter->rss_indir_tbl);
++ memcpy(indir->ring_index, adapter->rss_indir_tbl,
++ copy_size * sizeof(indir->ring_index[0]));
++ return 0;
++}
++#endif /* HAVE_ETHTOOL_GRXFHINDIR_SIZE */
++#endif /* ETHTOOL_GRXFHINDIR */
++#ifdef ETHTOOL_SRXFHINDIR
+ void igb_write_rss_indir_tbl(struct igb_adapter *adapter)
+ {
+ struct e1000_hw *hw = &adapter->hw;
+@@ -2872,14 +2852,24 @@
+ val |= adapter->rss_indir_tbl[i + j];
+ }
+
+- wr32(reg, val << shift);
++ E1000_WRITE_REG(hw, reg, val << shift);
+ reg += 4;
+ i += 4;
+ }
+ }
+
++#ifdef HAVE_ETHTOOL_GRXFHINDIR_SIZE
++#if (defined(ETHTOOL_GRSSH) && !defined(HAVE_ETHTOOL_GSRSSH))
++#ifdef HAVE_RXFH_HASHFUNC
+ static int igb_set_rxfh(struct net_device *netdev, const u32 *indir,
+- const u8 *key)
++ const u8 *key, const u8 hfunc)
++#else
++static int igb_set_rxfh(struct net_device *netdev, const u32 *indir,
++ const u8 *key)
++#endif /* HAVE_RXFH_HASHFUNC */
++#else
++static int igb_set_rxfh_indir(struct net_device *netdev, const u32 *indir)
++#endif /* HAVE_ETHTOOL_GSRSSH */
+ {
+ struct igb_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+@@ -2911,135 +2901,314 @@
+
+ return 0;
+ }
++#else
++static int igb_set_rxfh_indir(struct net_device *netdev,
++ const struct ethtool_rxfh_indir *indir)
++{
++ struct igb_adapter *adapter = netdev_priv(netdev);
++ size_t i;
++
++ if (indir->size != ARRAY_SIZE(adapter->rss_indir_tbl))
++ return -EINVAL;
++ for (i = 0; i < ARRAY_SIZE(adapter->rss_indir_tbl); i++)
++ if (indir->ring_index[i] >= adapter->rss_queues)
++ return -EINVAL;
+
+-static unsigned int igb_max_channels(struct igb_adapter *adapter)
++ memcpy(adapter->rss_indir_tbl, indir->ring_index,
++ sizeof(adapter->rss_indir_tbl));
++ igb_write_rss_indir_tbl(adapter);
++ return 0;
++}
++#endif /* HAVE_ETHTOOL_GRXFHINDIR_SIZE */
++#endif /* ETHTOOL_SRXFHINDIR */
++#ifdef ETHTOOL_GCHANNELS
++
++static unsigned int igb_max_rss_queues(struct igb_adapter *adapter)
+ {
+- struct e1000_hw *hw = &adapter->hw;
+- unsigned int max_combined = 0;
++ unsigned int max_rss_queues;
+
+- switch (hw->mac.type) {
++ /* Determine the maximum number of RSS queues supported. */
++ switch (adapter->hw.mac.type) {
+ case e1000_i211:
+- max_combined = IGB_MAX_RX_QUEUES_I211;
++ max_rss_queues = IGB_MAX_RX_QUEUES_I211;
+ break;
+ case e1000_82575:
+ case e1000_i210:
+- max_combined = IGB_MAX_RX_QUEUES_82575;
++ max_rss_queues = IGB_MAX_RX_QUEUES_82575;
+ break;
+ case e1000_i350:
+- if (!!adapter->vfs_allocated_count) {
+- max_combined = 1;
++ /* I350 cannot do RSS and SR-IOV at the same time */
++ if (adapter->vfs_allocated_count) {
++ max_rss_queues = 1;
+ break;
+ }
+ /* fall through */
+ case e1000_82576:
+- if (!!adapter->vfs_allocated_count) {
+- max_combined = 2;
++ if (adapter->vfs_allocated_count) {
++ max_rss_queues = 2;
+ break;
+ }
+ /* fall through */
+ case e1000_82580:
+- case e1000_i354:
+ default:
+- max_combined = IGB_MAX_RX_QUEUES;
++ max_rss_queues = IGB_MAX_RX_QUEUES;
+ break;
+ }
+
+- return max_combined;
++ return max_rss_queues;
+ }
+
+-static void igb_get_channels(struct net_device *netdev,
++static void igb_get_channels(struct net_device *dev,
+ struct ethtool_channels *ch)
+ {
+- struct igb_adapter *adapter = netdev_priv(netdev);
++ struct igb_adapter *adapter = netdev_priv(dev);
+
+- /* Report maximum channels */
+- ch->max_combined = igb_max_channels(adapter);
++ /* report maximum channels */
++ ch->max_combined = igb_max_rss_queues(adapter);
++ ch->max_rx = ch->max_combined;
++ if (adapter->vfs_allocated_count)
++ ch->max_tx = 1;
++ else
++ ch->max_tx = ch->max_combined;
+
+- /* Report info for other vector */
+- if (adapter->flags & IGB_FLAG_HAS_MSIX) {
++ /* report info for other vector */
++ if (adapter->msix_entries) {
+ ch->max_other = NON_Q_VECTORS;
+ ch->other_count = NON_Q_VECTORS;
+ }
+
+- ch->combined_count = adapter->rss_queues;
++ /* record RSS/TSS queues */
++ if (adapter->flags & IGB_FLAG_QUEUE_PAIRS) {
++ if (adapter->num_rx_queues > adapter->num_tx_queues) {
++ ch->combined_count = adapter->num_tx_queues;
++ ch->rx_count = adapter->num_rx_queues -
++ adapter->num_tx_queues;
++ } else if (adapter->num_rx_queues < adapter->num_tx_queues) {
++ ch->combined_count = adapter->num_rx_queues;
++ ch->tx_count = adapter->num_tx_queues -
++ adapter->num_rx_queues;
++ } else {
++ ch->combined_count = adapter->num_rx_queues;
++ }
++ } else {
++ ch->rx_count = adapter->num_rx_queues;
++ ch->tx_count = adapter->num_tx_queues;
++ }
+ }
++#endif /* ETHTOOL_GCHANNELS */
++#ifdef ETHTOOL_SCHANNELS
+
+-static int igb_set_channels(struct net_device *netdev,
+- struct ethtool_channels *ch)
++static int igb_set_channels(struct net_device *dev,
++ struct ethtool_channels *ch)
+ {
+- struct igb_adapter *adapter = netdev_priv(netdev);
+- unsigned int count = ch->combined_count;
+- unsigned int max_combined = 0;
++ struct igb_adapter *adapter = netdev_priv(dev);
++ unsigned int max_rss_queues;
+
+- /* Verify they are not requesting separate vectors */
+- if (!count || ch->rx_count || ch->tx_count)
++ /* we cannot support combined, Rx, and Tx vectors simultaneously */
++ if (ch->combined_count && ch->rx_count && ch->tx_count)
+ return -EINVAL;
+
+- /* Verify other_count is valid and has not been changed */
+- if (ch->other_count != NON_Q_VECTORS)
++ /* ignore other_count since it is not changeable */
++
++ /* verify we have at least one channel in each direction */
++ if (!ch->combined_count && (!ch->rx_count || !ch->tx_count))
+ return -EINVAL;
+
+- /* Verify the number of channels doesn't exceed hw limits */
+- max_combined = igb_max_channels(adapter);
+- if (count > max_combined)
++ /* verify number of Tx queues does not exceed 1 if SR-IOV is enabled */
++ if (adapter->vfs_allocated_count &&
++ ((ch->combined_count + ch->tx_count) > 1))
+ return -EINVAL;
+
+- if (count != adapter->rss_queues) {
+- adapter->rss_queues = count;
+- igb_set_flag_queue_pairs(adapter, max_combined);
++ /* verify the number of channels does not exceed hardware limits */
++ max_rss_queues = igb_max_rss_queues(adapter);
++ if (((ch->combined_count + ch->rx_count) > max_rss_queues) ||
++ ((ch->combined_count + ch->tx_count) > max_rss_queues))
++ return -EINVAL;
+
+- /* Hardware has to reinitialize queues and interrupts to
+- * match the new configuration.
++ /* Determine if we need to pair queues. */
++ switch (adapter->hw.mac.type) {
++ case e1000_82575:
++ case e1000_i211:
++ /* Device supports enough interrupts without queue pairing. */
++ break;
++ case e1000_i350:
++ /* The PF has 3 interrupts and 1 queue pair w/ SR-IOV */
++ if (adapter->vfs_allocated_count)
++ break;
++ case e1000_82576:
++ /*
++ * The PF has access to 6 interrupt vectors if the number of
++ * VFs is less than 7. If that is the case we don't have
++ * to pair up the queues.
+ */
+- return igb_reinit_queues(adapter);
++ if ((adapter->vfs_allocated_count > 0) &&
++ (adapter->vfs_allocated_count < 7))
++ break;
++ /* fall through */
++ case e1000_82580:
++ case e1000_i210:
++ default:
++ /* verify we can support as many queues as requested */
++ if ((ch->combined_count +
++ ch->rx_count + ch->tx_count) > MAX_Q_VECTORS)
++ return -EINVAL;
++ break;
+ }
+
+- return 0;
++ /* update configuration values */
++ adapter->rss_queues = ch->combined_count + ch->rx_count;
++ if (ch->rx_count == ch->tx_count || adapter->vfs_allocated_count)
++ adapter->tss_queues = 0;
++ else
++ adapter->tss_queues = ch->combined_count + ch->tx_count;
++
++ if (ch->combined_count)
++ adapter->flags |= IGB_FLAG_QUEUE_PAIRS;
++ else
++ adapter->flags &= ~IGB_FLAG_QUEUE_PAIRS;
++
++ /* update queue configuration for adapter */
++ return igb_setup_queues(adapter);
+ }
+
++#endif /* ETHTOOL_SCHANNELS */
+ static const struct ethtool_ops igb_ethtool_ops = {
+- .get_settings = igb_get_settings,
+- .set_settings = igb_set_settings,
+- .get_drvinfo = igb_get_drvinfo,
+- .get_regs_len = igb_get_regs_len,
+- .get_regs = igb_get_regs,
+- .get_wol = igb_get_wol,
+- .set_wol = igb_set_wol,
+- .get_msglevel = igb_get_msglevel,
+- .set_msglevel = igb_set_msglevel,
+- .nway_reset = igb_nway_reset,
+- .get_link = igb_get_link,
+- .get_eeprom_len = igb_get_eeprom_len,
+- .get_eeprom = igb_get_eeprom,
+- .set_eeprom = igb_set_eeprom,
+- .get_ringparam = igb_get_ringparam,
+- .set_ringparam = igb_set_ringparam,
+- .get_pauseparam = igb_get_pauseparam,
+- .set_pauseparam = igb_set_pauseparam,
+- .self_test = igb_diag_test,
+- .get_strings = igb_get_strings,
+- .set_phys_id = igb_set_phys_id,
+- .get_sset_count = igb_get_sset_count,
+- .get_ethtool_stats = igb_get_ethtool_stats,
+- .get_coalesce = igb_get_coalesce,
+- .set_coalesce = igb_set_coalesce,
+- .get_ts_info = igb_get_ts_info,
+- .get_rxnfc = igb_get_rxnfc,
+- .set_rxnfc = igb_set_rxnfc,
++ .get_settings = igb_get_settings,
++ .set_settings = igb_set_settings,
++ .get_drvinfo = igb_get_drvinfo,
++ .get_regs_len = igb_get_regs_len,
++ .get_regs = igb_get_regs,
++ .get_wol = igb_get_wol,
++ .set_wol = igb_set_wol,
++ .get_msglevel = igb_get_msglevel,
++ .set_msglevel = igb_set_msglevel,
++ .nway_reset = igb_nway_reset,
++ .get_link = igb_get_link,
++ .get_eeprom_len = igb_get_eeprom_len,
++ .get_eeprom = igb_get_eeprom,
++ .set_eeprom = igb_set_eeprom,
++ .get_ringparam = igb_get_ringparam,
++ .set_ringparam = igb_set_ringparam,
++ .get_pauseparam = igb_get_pauseparam,
++ .set_pauseparam = igb_set_pauseparam,
++ .self_test = igb_diag_test,
++ .get_strings = igb_get_strings,
++#ifndef HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT
++#ifdef HAVE_ETHTOOL_SET_PHYS_ID
++ .set_phys_id = igb_set_phys_id,
++#else
++ .phys_id = igb_phys_id,
++#endif /* HAVE_ETHTOOL_SET_PHYS_ID */
++#endif /* HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT */
++#ifdef HAVE_ETHTOOL_GET_SSET_COUNT
++ .get_sset_count = igb_get_sset_count,
++#else
++ .get_stats_count = igb_get_stats_count,
++ .self_test_count = igb_diag_test_count,
++#endif
++ .get_ethtool_stats = igb_get_ethtool_stats,
++#ifdef HAVE_ETHTOOL_GET_PERM_ADDR
++ .get_perm_addr = ethtool_op_get_perm_addr,
++#endif
++ .get_coalesce = igb_get_coalesce,
++ .set_coalesce = igb_set_coalesce,
++#ifndef HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT
++#ifdef HAVE_ETHTOOL_GET_TS_INFO
++ .get_ts_info = igb_get_ts_info,
++#endif /* HAVE_ETHTOOL_GET_TS_INFO */
++#endif /* HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT */
++#ifdef CONFIG_PM_RUNTIME
++ .begin = igb_ethtool_begin,
++ .complete = igb_ethtool_complete,
++#endif /* CONFIG_PM_RUNTIME */
++#ifndef HAVE_NDO_SET_FEATURES
++ .get_rx_csum = igb_get_rx_csum,
++ .set_rx_csum = igb_set_rx_csum,
++ .get_tx_csum = ethtool_op_get_tx_csum,
++ .set_tx_csum = igb_set_tx_csum,
++ .get_sg = ethtool_op_get_sg,
++ .set_sg = ethtool_op_set_sg,
++#ifdef NETIF_F_TSO
++ .get_tso = ethtool_op_get_tso,
++ .set_tso = igb_set_tso,
++#endif
++#ifdef ETHTOOL_GFLAGS
++ .get_flags = ethtool_op_get_flags,
++ .set_flags = igb_set_flags,
++#endif /* ETHTOOL_GFLAGS */
++#endif /* HAVE_NDO_SET_FEATURES */
++#ifdef ETHTOOL_GADV_COAL
++ .get_advcoal = igb_get_adv_coal,
++ .set_advcoal = igb_set_dmac_coal,
++#endif /* ETHTOOL_GADV_COAL */
++#ifndef HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT
++#ifdef ETHTOOL_GEEE
+ .get_eee = igb_get_eee,
++#endif
++#ifdef ETHTOOL_SEEE
+ .set_eee = igb_set_eee,
+- .get_module_info = igb_get_module_info,
+- .get_module_eeprom = igb_get_module_eeprom,
++#endif
++#ifdef ETHTOOL_GRXFHINDIR
++#ifdef HAVE_ETHTOOL_GRXFHINDIR_SIZE
+ .get_rxfh_indir_size = igb_get_rxfh_indir_size,
++#endif /* HAVE_ETHTOOL_GRSFHINDIR_SIZE */
++#if (defined(ETHTOOL_GRSSH) && !defined(HAVE_ETHTOOL_GSRSSH))
+ .get_rxfh = igb_get_rxfh,
++#else
++ .get_rxfh_indir = igb_get_rxfh_indir,
++#endif /* HAVE_ETHTOOL_GSRSSH */
++#endif /* ETHTOOL_GRXFHINDIR */
++#ifdef ETHTOOL_SRXFHINDIR
++#if (defined(ETHTOOL_GRSSH) && !defined(HAVE_ETHTOOL_GSRSSH))
+ .set_rxfh = igb_set_rxfh,
+- .get_channels = igb_get_channels,
+- .set_channels = igb_set_channels,
+- .begin = igb_ethtool_begin,
+- .complete = igb_ethtool_complete,
++#else
++ .set_rxfh_indir = igb_set_rxfh_indir,
++#endif /* HAVE_ETHTOOL_GSRSSH */
++#endif /* ETHTOOL_SRXFHINDIR */
++#ifdef ETHTOOL_GCHANNELS
++ .get_channels = igb_get_channels,
++#endif /* ETHTOOL_GCHANNELS */
++#ifdef ETHTOOL_SCHANNELS
++ .set_channels = igb_set_channels,
++#endif /* ETHTOOL_SCHANNELS */
++#endif /* HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT */
++#ifdef ETHTOOL_GRXFH
++ .get_rxnfc = igb_get_rxnfc,
++ .set_rxnfc = igb_set_rxnfc,
++#endif
++};
++
++#ifdef HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT
++static const struct ethtool_ops_ext igb_ethtool_ops_ext = {
++ .size = sizeof(struct ethtool_ops_ext),
++ .get_ts_info = igb_get_ts_info,
++ .set_phys_id = igb_set_phys_id,
++ .get_eee = igb_get_eee,
++ .set_eee = igb_set_eee,
++#ifdef HAVE_ETHTOOL_GRXFHINDIR_SIZE
++ .get_rxfh_indir_size = igb_get_rxfh_indir_size,
++#endif /* HAVE_ETHTOOL_GRSFHINDIR_SIZE */
++ .get_rxfh_indir = igb_get_rxfh_indir,
++ .set_rxfh_indir = igb_set_rxfh_indir,
++ .get_channels = igb_get_channels,
++ .set_channels = igb_set_channels,
+ };
+
+ void igb_set_ethtool_ops(struct net_device *netdev)
+ {
+- netdev->ethtool_ops = &igb_ethtool_ops;
++ SET_ETHTOOL_OPS(netdev, &igb_ethtool_ops);
++ set_ethtool_ops_ext(netdev, &igb_ethtool_ops_ext);
+ }
++#else
++void igb_set_ethtool_ops(struct net_device *netdev)
++{
++ /* have to "undeclare" const on this struct to remove warnings */
++#ifndef ETHTOOL_OPS_COMPAT
++ netdev->ethtool_ops = (struct ethtool_ops *)&igb_ethtool_ops;
++#else
++ SET_ETHTOOL_OPS(netdev, (struct ethtool_ops *)&igb_ethtool_ops);
++#endif /* SET_ETHTOOL_OPS */
++}
++#endif /* HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT */
++#endif /* SIOCETHTOOL */
++
+diff -Nu a/drivers/net/ethernet/intel/igb/igb_hwmon.c b/drivers/net/ethernet/intel/igb/igb_hwmon.c
+--- a/drivers/net/ethernet/intel/igb/igb_hwmon.c 2016-11-13 09:20:24.790171605 +0000
++++ b/drivers/net/ethernet/intel/igb/igb_hwmon.c 2016-11-14 14:32:08.579567168 +0000
+@@ -1,30 +1,31 @@
+-/* Intel(R) Gigabit Ethernet Linux driver
+- * Copyright(c) 2007-2014 Intel Corporation.
+- *
+- * This program is free software; you can redistribute it and/or modify it
+- * under the terms and conditions of the GNU General Public License,
+- * version 2, as published by the Free Software Foundation.
+- *
+- * This program is distributed in the hope it will be useful, but WITHOUT
+- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+- * more details.
+- *
+- * You should have received a copy of the GNU General Public License along with
+- * this program; if not, see .
+- *
+- * The full GNU General Public License is included in this distribution in
+- * the file called "COPYING".
+- *
+- * Contact Information:
+- * e1000-devel Mailing List
+- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+- */
++/*******************************************************************************
++
++ Intel(R) Gigabit Ethernet Linux driver
++ Copyright(c) 2007-2015 Intel Corporation.
++
++ This program is free software; you can redistribute it and/or modify it
++ under the terms and conditions of the GNU General Public License,
++ version 2, as published by the Free Software Foundation.
++
++ This program is distributed in the hope it will be useful, but WITHOUT
++ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ more details.
++
++ The full GNU General Public License is included in this distribution in
++ the file called "COPYING".
++
++ Contact Information:
++ Linux NICS
++ e1000-devel Mailing List
++ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
++
++*******************************************************************************/
+
+ #include "igb.h"
+ #include "e1000_82575.h"
+ #include "e1000_hw.h"
+-
++#ifdef IGB_HWMON
+ #include
+ #include
+ #include
+@@ -34,28 +35,29 @@
+ #include
+ #include
+
+-#ifdef CONFIG_IGB_HWMON
++#ifdef HAVE_I2C_SUPPORT
+ static struct i2c_board_info i350_sensor_info = {
+ I2C_BOARD_INFO("i350bb", (0Xf8 >> 1)),
+ };
++#endif /* HAVE_I2C_SUPPORT */
+
+ /* hwmon callback functions */
+ static ssize_t igb_hwmon_show_location(struct device *dev,
+- struct device_attribute *attr,
+- char *buf)
++ struct device_attribute *attr,
++ char *buf)
+ {
+ struct hwmon_attr *igb_attr = container_of(attr, struct hwmon_attr,
+- dev_attr);
++ dev_attr);
+ return sprintf(buf, "loc%u\n",
+ igb_attr->sensor->location);
+ }
+
+ static ssize_t igb_hwmon_show_temp(struct device *dev,
+- struct device_attribute *attr,
+- char *buf)
++ struct device_attribute *attr,
++ char *buf)
+ {
+ struct hwmon_attr *igb_attr = container_of(attr, struct hwmon_attr,
+- dev_attr);
++ dev_attr);
+ unsigned int value;
+
+ /* reset the temp field */
+@@ -70,11 +72,11 @@
+ }
+
+ static ssize_t igb_hwmon_show_cautionthresh(struct device *dev,
+- struct device_attribute *attr,
+- char *buf)
++ struct device_attribute *attr,
++ char *buf)
+ {
+ struct hwmon_attr *igb_attr = container_of(attr, struct hwmon_attr,
+- dev_attr);
++ dev_attr);
+ unsigned int value = igb_attr->sensor->caution_thresh;
+
+ /* display millidegree */
+@@ -84,11 +86,11 @@
+ }
+
+ static ssize_t igb_hwmon_show_maxopthresh(struct device *dev,
+- struct device_attribute *attr,
+- char *buf)
++ struct device_attribute *attr,
++ char *buf)
+ {
+ struct hwmon_attr *igb_attr = container_of(attr, struct hwmon_attr,
+- dev_attr);
++ dev_attr);
+ unsigned int value = igb_attr->sensor->max_op_thresh;
+
+ /* display millidegree */
+@@ -107,35 +109,34 @@
+ * the data structures we need to get the data to display.
+ */
+ static int igb_add_hwmon_attr(struct igb_adapter *adapter,
+- unsigned int offset, int type)
+-{
++ unsigned int offset, int type) {
+ int rc;
+ unsigned int n_attr;
+ struct hwmon_attr *igb_attr;
+
+- n_attr = adapter->igb_hwmon_buff->n_hwmon;
+- igb_attr = &adapter->igb_hwmon_buff->hwmon_list[n_attr];
++ n_attr = adapter->igb_hwmon_buff.n_hwmon;
++ igb_attr = &adapter->igb_hwmon_buff.hwmon_list[n_attr];
+
+ switch (type) {
+ case IGB_HWMON_TYPE_LOC:
+ igb_attr->dev_attr.show = igb_hwmon_show_location;
+ snprintf(igb_attr->name, sizeof(igb_attr->name),
+- "temp%u_label", offset + 1);
++ "temp%u_label", offset);
+ break;
+ case IGB_HWMON_TYPE_TEMP:
+ igb_attr->dev_attr.show = igb_hwmon_show_temp;
+ snprintf(igb_attr->name, sizeof(igb_attr->name),
+- "temp%u_input", offset + 1);
++ "temp%u_input", offset);
+ break;
+ case IGB_HWMON_TYPE_CAUTION:
+ igb_attr->dev_attr.show = igb_hwmon_show_cautionthresh;
+ snprintf(igb_attr->name, sizeof(igb_attr->name),
+- "temp%u_max", offset + 1);
++ "temp%u_max", offset);
+ break;
+ case IGB_HWMON_TYPE_MAX:
+ igb_attr->dev_attr.show = igb_hwmon_show_maxopthresh;
+ snprintf(igb_attr->name, sizeof(igb_attr->name),
+- "temp%u_crit", offset + 1);
++ "temp%u_crit", offset);
+ break;
+ default:
+ rc = -EPERM;
+@@ -150,16 +151,30 @@
+ igb_attr->dev_attr.attr.mode = S_IRUGO;
+ igb_attr->dev_attr.attr.name = igb_attr->name;
+ sysfs_attr_init(&igb_attr->dev_attr.attr);
++ rc = device_create_file(&adapter->pdev->dev,
++ &igb_attr->dev_attr);
++ if (rc == 0)
++ ++adapter->igb_hwmon_buff.n_hwmon;
+
+- adapter->igb_hwmon_buff->attrs[n_attr] = &igb_attr->dev_attr.attr;
+-
+- ++adapter->igb_hwmon_buff->n_hwmon;
+-
+- return 0;
++ return rc;
+ }
+
+ static void igb_sysfs_del_adapter(struct igb_adapter *adapter)
+ {
++ int i;
++
++ if (adapter == NULL)
++ return;
++
++ for (i = 0; i < adapter->igb_hwmon_buff.n_hwmon; i++) {
++ device_remove_file(&adapter->pdev->dev,
++ &adapter->igb_hwmon_buff.hwmon_list[i].dev_attr);
++ }
++
++ kfree(adapter->igb_hwmon_buff.hwmon_list);
++
++ if (adapter->igb_hwmon_buff.device)
++ hwmon_device_unregister(adapter->igb_hwmon_buff.device);
+ }
+
+ /* called from igb_main.c */
+@@ -171,11 +186,13 @@
+ /* called from igb_main.c */
+ int igb_sysfs_init(struct igb_adapter *adapter)
+ {
+- struct hwmon_buff *igb_hwmon;
+- struct i2c_client *client;
+- struct device *hwmon_dev;
++ struct hwmon_buff *igb_hwmon = &adapter->igb_hwmon_buff;
+ unsigned int i;
++ int n_attrs;
+ int rc = 0;
++#ifdef HAVE_I2C_SUPPORT
++ struct i2c_client *client = NULL;
++#endif /* HAVE_I2C_SUPPORT */
+
+ /* If this method isn't defined we don't support thermals */
+ if (adapter->hw.mac.ops.init_thermal_sensor_thresh == NULL)
+@@ -183,16 +200,35 @@
+
+ /* Don't create thermal hwmon interface if no sensors present */
+ rc = (adapter->hw.mac.ops.init_thermal_sensor_thresh(&adapter->hw));
+- if (rc)
++ if (rc)
++ goto exit;
++#ifdef HAVE_I2C_SUPPORT
++ /* init i2c_client */
++ client = i2c_new_device(&adapter->i2c_adap, &i350_sensor_info);
++ if (client == NULL) {
++ dev_info(&adapter->pdev->dev,
++ "Failed to create new i2c device..\n");
+ goto exit;
++ }
++ adapter->i2c_client = client;
++#endif /* HAVE_I2C_SUPPORT */
+
+- igb_hwmon = devm_kzalloc(&adapter->pdev->dev, sizeof(*igb_hwmon),
+- GFP_KERNEL);
+- if (!igb_hwmon) {
++ /* Allocation space for max attributes
++ * max num sensors * values (loc, temp, max, caution)
++ */
++ n_attrs = E1000_MAX_SENSORS * 4;
++ igb_hwmon->hwmon_list = kcalloc(n_attrs, sizeof(struct hwmon_attr),
++ GFP_KERNEL);
++ if (!igb_hwmon->hwmon_list) {
+ rc = -ENOMEM;
+- goto exit;
++ goto err;
++ }
++
++ igb_hwmon->device = hwmon_device_register(&adapter->pdev->dev);
++ if (IS_ERR(igb_hwmon->device)) {
++ rc = PTR_ERR(igb_hwmon->device);
++ goto err;
+ }
+- adapter->igb_hwmon_buff = igb_hwmon;
+
+ for (i = 0; i < E1000_MAX_SENSORS; i++) {
+
+@@ -204,39 +240,11 @@
+
+ /* Bail if any hwmon attr struct fails to initialize */
+ rc = igb_add_hwmon_attr(adapter, i, IGB_HWMON_TYPE_CAUTION);
++ rc |= igb_add_hwmon_attr(adapter, i, IGB_HWMON_TYPE_LOC);
++ rc |= igb_add_hwmon_attr(adapter, i, IGB_HWMON_TYPE_TEMP);
++ rc |= igb_add_hwmon_attr(adapter, i, IGB_HWMON_TYPE_MAX);
+ if (rc)
+- goto exit;
+- rc = igb_add_hwmon_attr(adapter, i, IGB_HWMON_TYPE_LOC);
+- if (rc)
+- goto exit;
+- rc = igb_add_hwmon_attr(adapter, i, IGB_HWMON_TYPE_TEMP);
+- if (rc)
+- goto exit;
+- rc = igb_add_hwmon_attr(adapter, i, IGB_HWMON_TYPE_MAX);
+- if (rc)
+- goto exit;
+- }
+-
+- /* init i2c_client */
+- client = i2c_new_device(&adapter->i2c_adap, &i350_sensor_info);
+- if (client == NULL) {
+- dev_info(&adapter->pdev->dev,
+- "Failed to create new i2c device.\n");
+- rc = -ENODEV;
+- goto exit;
+- }
+- adapter->i2c_client = client;
+-
+- igb_hwmon->groups[0] = &igb_hwmon->group;
+- igb_hwmon->group.attrs = igb_hwmon->attrs;
+-
+- hwmon_dev = devm_hwmon_device_register_with_groups(&adapter->pdev->dev,
+- client->name,
+- igb_hwmon,
+- igb_hwmon->groups);
+- if (IS_ERR(hwmon_dev)) {
+- rc = PTR_ERR(hwmon_dev);
+- goto err;
++ goto err;
+ }
+
+ goto exit;
+@@ -246,4 +254,4 @@
+ exit:
+ return rc;
+ }
+-#endif
++#endif /* IGB_HWMON */
+diff -Nu a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
+--- a/drivers/net/ethernet/intel/igb/igb_main.c 2016-11-13 09:20:24.790171605 +0000
++++ b/drivers/net/ethernet/intel/igb/igb_main.c 2016-11-14 14:32:08.579567168 +0000
+@@ -1,113 +1,114 @@
+-/* Intel(R) Gigabit Ethernet Linux driver
+- * Copyright(c) 2007-2014 Intel Corporation.
+- *
+- * This program is free software; you can redistribute it and/or modify it
+- * under the terms and conditions of the GNU General Public License,
+- * version 2, as published by the Free Software Foundation.
+- *
+- * This program is distributed in the hope it will be useful, but WITHOUT
+- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+- * more details.
+- *
+- * You should have received a copy of the GNU General Public License along with
+- * this program; if not, see .
+- *
+- * The full GNU General Public License is included in this distribution in
+- * the file called "COPYING".
+- *
+- * Contact Information:
+- * e1000-devel Mailing List
+- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+- */
++/*******************************************************************************
++
++ Intel(R) Gigabit Ethernet Linux driver
++ Copyright(c) 2007-2015 Intel Corporation.
++
++ This program is free software; you can redistribute it and/or modify it
++ under the terms and conditions of the GNU General Public License,
++ version 2, as published by the Free Software Foundation.
+
+-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
++ This program is distributed in the hope it will be useful, but WITHOUT
++ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ more details.
++
++ The full GNU General Public License is included in this distribution in
++ the file called "COPYING".
++
++ Contact Information:
++ Linux NICS
++ e1000-devel Mailing List
++ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
++
++*******************************************************************************/
+
+ #include
+ #include
+ #include
+-#include
+ #include
+ #include
+ #include
+-#include
+-#include
++#include
++#ifdef NETIF_F_TSO
+ #include
++#ifdef NETIF_F_TSO6
++#include
+ #include
+-#include
++#endif
++#endif
++#ifdef SIOCGMIIPHY
+ #include
++#endif
++#ifdef SIOCETHTOOL
+ #include
+-#include
++#endif
+ #include
+-#include
+-#include
+-#include
+-#include
+-#include
+-#include
+-#include
+-#include
+-#include
+-#include
++#ifdef CONFIG_PM_RUNTIME
+ #include
+-#ifdef CONFIG_IGB_DCA
+-#include
+-#endif
+-#include
++#endif /* CONFIG_PM_RUNTIME */
++
++#include
+ #include "igb.h"
++#include "igb_vmdq.h"
++
++#if defined(DEBUG) || defined(DEBUG_DUMP) || defined(DEBUG_ICR) \
++ || defined(DEBUG_ITR)
++#define DRV_DEBUG "_debug"
++#else
++#define DRV_DEBUG
++#endif
++#define DRV_HW_PERF
++#define VERSION_SUFFIX
+
+ #define MAJ 5
+-#define MIN 0
+-#define BUILD 5
+-#define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \
+-__stringify(BUILD) "-k"
++#define MIN 3
++#define BUILD 5.4
++#define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "."\
++ __stringify(BUILD) VERSION_SUFFIX DRV_DEBUG DRV_HW_PERF
++
+ char igb_driver_name[] = "igb";
+ char igb_driver_version[] = DRV_VERSION;
+ static const char igb_driver_string[] =
+ "Intel(R) Gigabit Ethernet Network Driver";
+ static const char igb_copyright[] =
+- "Copyright (c) 2007-2014 Intel Corporation.";
+-
+-static const struct e1000_info *igb_info_tbl[] = {
+- [board_82575] = &e1000_82575_info,
+-};
++ "Copyright (c) 2007-2015 Intel Corporation.";
+
+ static const struct pci_device_id igb_pci_tbl[] = {
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_I354_BACKPLANE_1GBPS) },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_I354_SGMII) },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_I354_BACKPLANE_2_5GBPS) },
+- { PCI_VDEVICE(INTEL, E1000_DEV_ID_I211_COPPER), board_82575 },
+- { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_COPPER), board_82575 },
+- { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_FIBER), board_82575 },
+- { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_SERDES), board_82575 },
+- { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_SGMII), board_82575 },
+- { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_COPPER_FLASHLESS), board_82575 },
+- { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_SERDES_FLASHLESS), board_82575 },
+- { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_COPPER), board_82575 },
+- { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_FIBER), board_82575 },
+- { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SERDES), board_82575 },
+- { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SGMII), board_82575 },
+- { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER), board_82575 },
+- { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_FIBER), board_82575 },
+- { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_QUAD_FIBER), board_82575 },
+- { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SERDES), board_82575 },
+- { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SGMII), board_82575 },
+- { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER_DUAL), board_82575 },
+- { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SGMII), board_82575 },
+- { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SERDES), board_82575 },
+- { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_BACKPLANE), board_82575 },
+- { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SFP), board_82575 },
+- { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576), board_82575 },
+- { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS), board_82575 },
+- { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS_SERDES), board_82575 },
+- { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_FIBER), board_82575 },
+- { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES), board_82575 },
+- { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES_QUAD), board_82575 },
+- { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER_ET2), board_82575 },
+- { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER), board_82575 },
+- { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_COPPER), board_82575 },
+- { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_FIBER_SERDES), board_82575 },
+- { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575GB_QUAD_COPPER), board_82575 },
++ { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_COPPER) },
++ { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_FIBER) },
++ { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_SERDES) },
++ { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_SGMII) },
++ { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_COPPER_FLASHLESS) },
++ { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_SERDES_FLASHLESS) },
++ { PCI_VDEVICE(INTEL, E1000_DEV_ID_I211_COPPER) },
++ { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_COPPER) },
++ { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_FIBER) },
++ { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SERDES) },
++ { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SGMII) },
++ { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER) },
++ { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_FIBER) },
++ { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_QUAD_FIBER) },
++ { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SERDES) },
++ { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SGMII) },
++ { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER_DUAL) },
++ { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SGMII) },
++ { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SERDES) },
++ { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_BACKPLANE) },
++ { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SFP) },
++ { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576) },
++ { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS) },
++ { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS_SERDES) },
++ { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_FIBER) },
++ { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES) },
++ { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES_QUAD) },
++ { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER_ET2) },
++ { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER) },
++ { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_COPPER) },
++ { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_FIBER_SERDES) },
++ { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575GB_QUAD_COPPER) },
+ /* required last entry */
+ {0, }
+ };
+@@ -122,84 +123,114 @@
+ static int igb_probe(struct pci_dev *, const struct pci_device_id *);
+ static void igb_remove(struct pci_dev *pdev);
+ static int igb_sw_init(struct igb_adapter *);
+-static int igb_open(struct net_device *);
+-static int igb_close(struct net_device *);
+ static void igb_configure(struct igb_adapter *);
+ static void igb_configure_tx(struct igb_adapter *);
+ static void igb_configure_rx(struct igb_adapter *);
+ static void igb_clean_all_tx_rings(struct igb_adapter *);
+ static void igb_clean_all_rx_rings(struct igb_adapter *);
+ static void igb_clean_tx_ring(struct igb_ring *);
+-static void igb_clean_rx_ring(struct igb_ring *);
+ static void igb_set_rx_mode(struct net_device *);
+ static void igb_update_phy_info(unsigned long);
+ static void igb_watchdog(unsigned long);
+ static void igb_watchdog_task(struct work_struct *);
++static void igb_dma_err_task(struct work_struct *);
++static void igb_dma_err_timer(unsigned long data);
+ static netdev_tx_t igb_xmit_frame(struct sk_buff *skb, struct net_device *);
+-static struct rtnl_link_stats64 *igb_get_stats64(struct net_device *dev,
+- struct rtnl_link_stats64 *stats);
++static struct net_device_stats *igb_get_stats(struct net_device *);
+ static int igb_change_mtu(struct net_device *, int);
++/* void igb_full_sync_mac_table(struct igb_adapter *adapter); */
+ static int igb_set_mac(struct net_device *, void *);
+ static void igb_set_uta(struct igb_adapter *adapter);
+ static irqreturn_t igb_intr(int irq, void *);
+ static irqreturn_t igb_intr_msi(int irq, void *);
+ static irqreturn_t igb_msix_other(int irq, void *);
++static void igb_rar_set_qsel(struct igb_adapter *, u8 *, u32, u8);
+ static irqreturn_t igb_msix_ring(int irq, void *);
+-#ifdef CONFIG_IGB_DCA
++#ifdef IGB_DCA
+ static void igb_update_dca(struct igb_q_vector *);
+ static void igb_setup_dca(struct igb_adapter *);
+-#endif /* CONFIG_IGB_DCA */
++#endif /* IGB_DCA */
+ static int igb_poll(struct napi_struct *, int);
+ static bool igb_clean_tx_irq(struct igb_q_vector *);
+ static bool igb_clean_rx_irq(struct igb_q_vector *, int);
+ static int igb_ioctl(struct net_device *, struct ifreq *, int cmd);
+ static void igb_tx_timeout(struct net_device *);
+ static void igb_reset_task(struct work_struct *);
+-static void igb_vlan_mode(struct net_device *netdev,
+- netdev_features_t features);
+-static int igb_vlan_rx_add_vid(struct net_device *, __be16, u16);
+-static int igb_vlan_rx_kill_vid(struct net_device *, __be16, u16);
++#ifdef HAVE_VLAN_RX_REGISTER
++static void igb_vlan_mode(struct net_device *, struct vlan_group *);
++#endif
++#ifdef HAVE_INT_NDO_VLAN_RX_ADD_VID
++#ifdef NETIF_F_HW_VLAN_CTAG_RX
++static int igb_vlan_rx_add_vid(struct net_device *,
++ __always_unused __be16 proto, u16);
++static int igb_vlan_rx_kill_vid(struct net_device *,
++ __always_unused __be16 proto, u16);
++#else
++static int igb_vlan_rx_add_vid(struct net_device *, u16);
++static int igb_vlan_rx_kill_vid(struct net_device *, u16);
++#endif
++#else
++static void igb_vlan_rx_add_vid(struct net_device *, u16);
++static void igb_vlan_rx_kill_vid(struct net_device *, u16);
++#endif
+ static void igb_restore_vlan(struct igb_adapter *);
+-static void igb_rar_set_qsel(struct igb_adapter *, u8 *, u32 , u8);
+ static void igb_ping_all_vfs(struct igb_adapter *);
+ static void igb_msg_task(struct igb_adapter *);
+ static void igb_vmm_control(struct igb_adapter *);
+ static int igb_set_vf_mac(struct igb_adapter *, int, unsigned char *);
+ static void igb_restore_vf_multicasts(struct igb_adapter *adapter);
++static void igb_process_mdd_event(struct igb_adapter *);
++#ifdef IFLA_VF_MAX
+ static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac);
+ static int igb_ndo_set_vf_vlan(struct net_device *netdev,
+ int vf, u16 vlan, u8 qos);
+-static int igb_ndo_set_vf_bw(struct net_device *, int, int, int);
++#ifdef HAVE_VF_SPOOFCHK_CONFIGURE
+ static int igb_ndo_set_vf_spoofchk(struct net_device *netdev, int vf,
+- bool setting);
++ bool setting);
++#endif
++#ifdef HAVE_NDO_SET_VF_MIN_MAX_TX_RATE
++static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf,
++ int min_tx_rate, int tx_rate);
++#else
++static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate);
++#endif /* HAVE_NDO_SET_VF_MIN_MAX_TX_RATE */
+ static int igb_ndo_get_vf_config(struct net_device *netdev, int vf,
+ struct ifla_vf_info *ivi);
+ static void igb_check_vf_rate_limit(struct igb_adapter *);
+-
+-#ifdef CONFIG_PCI_IOV
+-static int igb_vf_configure(struct igb_adapter *adapter, int vf);
+-static int igb_pci_enable_sriov(struct pci_dev *dev, int num_vfs);
+ #endif
+-
++static int igb_vf_configure(struct igb_adapter *adapter, int vf);
+ #ifdef CONFIG_PM
+-#ifdef CONFIG_PM_SLEEP
+-static int igb_suspend(struct device *);
+-#endif
+-static int igb_resume(struct device *);
++#ifdef HAVE_SYSTEM_SLEEP_PM_OPS
++static int igb_suspend(struct device *dev);
++static int igb_resume(struct device *dev);
+ #ifdef CONFIG_PM_RUNTIME
+ static int igb_runtime_suspend(struct device *dev);
+ static int igb_runtime_resume(struct device *dev);
+ static int igb_runtime_idle(struct device *dev);
+-#endif
++#endif /* CONFIG_PM_RUNTIME */
+ static const struct dev_pm_ops igb_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(igb_suspend, igb_resume)
++#ifdef CONFIG_PM_RUNTIME
+ SET_RUNTIME_PM_OPS(igb_runtime_suspend, igb_runtime_resume,
+ igb_runtime_idle)
++#endif /* CONFIG_PM_RUNTIME */
+ };
+-#endif
++#else
++static int igb_suspend(struct pci_dev *pdev, pm_message_t state);
++static int igb_resume(struct pci_dev *pdev);
++#endif /* HAVE_SYSTEM_SLEEP_PM_OPS */
++#endif /* CONFIG_PM */
++#ifndef USE_REBOOT_NOTIFIER
+ static void igb_shutdown(struct pci_dev *);
+-static int igb_pci_sriov_configure(struct pci_dev *dev, int num_vfs);
+-#ifdef CONFIG_IGB_DCA
++#else
++static int igb_notify_reboot(struct notifier_block *, unsigned long, void *);
++static struct notifier_block igb_notifier_reboot = {
++ .notifier_call = igb_notify_reboot,
++ .next = NULL,
++ .priority = 0
++};
++#endif
++#ifdef IGB_DCA
+ static int igb_notify_dca(struct notifier_block *, unsigned long, void *);
+ static struct notifier_block dca_notifier = {
+ .notifier_call = igb_notify_dca,
+@@ -211,462 +242,87 @@
+ /* for netdump / net console */
+ static void igb_netpoll(struct net_device *);
+ #endif
+-#ifdef CONFIG_PCI_IOV
+-static unsigned int max_vfs;
+-module_param(max_vfs, uint, 0);
+-MODULE_PARM_DESC(max_vfs, "Maximum number of virtual functions to allocate per physical function");
+-#endif /* CONFIG_PCI_IOV */
+
++#ifdef HAVE_PCI_ERS
+ static pci_ers_result_t igb_io_error_detected(struct pci_dev *,
+ pci_channel_state_t);
+ static pci_ers_result_t igb_io_slot_reset(struct pci_dev *);
+ static void igb_io_resume(struct pci_dev *);
+
+-static const struct pci_error_handlers igb_err_handler = {
++static struct pci_error_handlers igb_err_handler = {
+ .error_detected = igb_io_error_detected,
+ .slot_reset = igb_io_slot_reset,
+ .resume = igb_io_resume,
+ };
++#endif
+
++static void igb_init_fw(struct igb_adapter *adapter);
+ static void igb_init_dmac(struct igb_adapter *adapter, u32 pba);
+
+ static struct pci_driver igb_driver = {
+ .name = igb_driver_name,
+ .id_table = igb_pci_tbl,
+ .probe = igb_probe,
+- .remove = igb_remove,
++ .remove = __devexit_p(igb_remove),
+ #ifdef CONFIG_PM
++#ifdef HAVE_SYSTEM_SLEEP_PM_OPS
+ .driver.pm = &igb_pm_ops,
+-#endif
++#else
++ .suspend = igb_suspend,
++ .resume = igb_resume,
++#endif /* HAVE_SYSTEM_SLEEP_PM_OPS */
++#endif /* CONFIG_PM */
++#ifndef USE_REBOOT_NOTIFIER
+ .shutdown = igb_shutdown,
+- .sriov_configure = igb_pci_sriov_configure,
++#endif
++#ifdef HAVE_PCI_ERS
+ .err_handler = &igb_err_handler
++#endif
+ };
+
++/* u32 e1000_read_reg(struct e1000_hw *hw, u32 reg); */
++
+ MODULE_AUTHOR("Intel Corporation, ");
+ MODULE_DESCRIPTION("Intel(R) Gigabit Ethernet Network Driver");
+ MODULE_LICENSE("GPL");
+ MODULE_VERSION(DRV_VERSION);
+
+-#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
+-static int debug = -1;
+-module_param(debug, int, 0);
+-MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
+-
+-struct igb_reg_info {
+- u32 ofs;
+- char *name;
+-};
+-
+-static const struct igb_reg_info igb_reg_info_tbl[] = {
+-
+- /* General Registers */
+- {E1000_CTRL, "CTRL"},
+- {E1000_STATUS, "STATUS"},
+- {E1000_CTRL_EXT, "CTRL_EXT"},
+-
+- /* Interrupt Registers */
+- {E1000_ICR, "ICR"},
+-
+- /* RX Registers */
+- {E1000_RCTL, "RCTL"},
+- {E1000_RDLEN(0), "RDLEN"},
+- {E1000_RDH(0), "RDH"},
+- {E1000_RDT(0), "RDT"},
+- {E1000_RXDCTL(0), "RXDCTL"},
+- {E1000_RDBAL(0), "RDBAL"},
+- {E1000_RDBAH(0), "RDBAH"},
+-
+- /* TX Registers */
+- {E1000_TCTL, "TCTL"},
+- {E1000_TDBAL(0), "TDBAL"},
+- {E1000_TDBAH(0), "TDBAH"},
+- {E1000_TDLEN(0), "TDLEN"},
+- {E1000_TDH(0), "TDH"},
+- {E1000_TDT(0), "TDT"},
+- {E1000_TXDCTL(0), "TXDCTL"},
+- {E1000_TDFH, "TDFH"},
+- {E1000_TDFT, "TDFT"},
+- {E1000_TDFHS, "TDFHS"},
+- {E1000_TDFPC, "TDFPC"},
+-
+- /* List Terminator */
+- {}
+-};
+-
+-/* igb_regdump - register printout routine */
+-static void igb_regdump(struct e1000_hw *hw, struct igb_reg_info *reginfo)
+-{
+- int n = 0;
+- char rname[16];
+- u32 regs[8];
+-
+- switch (reginfo->ofs) {
+- case E1000_RDLEN(0):
+- for (n = 0; n < 4; n++)
+- regs[n] = rd32(E1000_RDLEN(n));
+- break;
+- case E1000_RDH(0):
+- for (n = 0; n < 4; n++)
+- regs[n] = rd32(E1000_RDH(n));
+- break;
+- case E1000_RDT(0):
+- for (n = 0; n < 4; n++)
+- regs[n] = rd32(E1000_RDT(n));
+- break;
+- case E1000_RXDCTL(0):
+- for (n = 0; n < 4; n++)
+- regs[n] = rd32(E1000_RXDCTL(n));
+- break;
+- case E1000_RDBAL(0):
+- for (n = 0; n < 4; n++)
+- regs[n] = rd32(E1000_RDBAL(n));
+- break;
+- case E1000_RDBAH(0):
+- for (n = 0; n < 4; n++)
+- regs[n] = rd32(E1000_RDBAH(n));
+- break;
+- case E1000_TDBAL(0):
+- for (n = 0; n < 4; n++)
+- regs[n] = rd32(E1000_RDBAL(n));
+- break;
+- case E1000_TDBAH(0):
+- for (n = 0; n < 4; n++)
+- regs[n] = rd32(E1000_TDBAH(n));
+- break;
+- case E1000_TDLEN(0):
+- for (n = 0; n < 4; n++)
+- regs[n] = rd32(E1000_TDLEN(n));
+- break;
+- case E1000_TDH(0):
+- for (n = 0; n < 4; n++)
+- regs[n] = rd32(E1000_TDH(n));
+- break;
+- case E1000_TDT(0):
+- for (n = 0; n < 4; n++)
+- regs[n] = rd32(E1000_TDT(n));
+- break;
+- case E1000_TXDCTL(0):
+- for (n = 0; n < 4; n++)
+- regs[n] = rd32(E1000_TXDCTL(n));
+- break;
+- default:
+- pr_info("%-15s %08x\n", reginfo->name, rd32(reginfo->ofs));
+- return;
+- }
+-
+- snprintf(rname, 16, "%s%s", reginfo->name, "[0-3]");
+- pr_info("%-15s %08x %08x %08x %08x\n", rname, regs[0], regs[1],
+- regs[2], regs[3]);
+-}
+-
+-/* igb_dump - Print registers, Tx-rings and Rx-rings */
+-static void igb_dump(struct igb_adapter *adapter)
+-{
+- struct net_device *netdev = adapter->netdev;
+- struct e1000_hw *hw = &adapter->hw;
+- struct igb_reg_info *reginfo;
+- struct igb_ring *tx_ring;
+- union e1000_adv_tx_desc *tx_desc;
+- struct my_u0 { u64 a; u64 b; } *u0;
+- struct igb_ring *rx_ring;
+- union e1000_adv_rx_desc *rx_desc;
+- u32 staterr;
+- u16 i, n;
+-
+- if (!netif_msg_hw(adapter))
+- return;
+-
+- /* Print netdevice Info */
+- if (netdev) {
+- dev_info(&adapter->pdev->dev, "Net device Info\n");
+- pr_info("Device Name state trans_start last_rx\n");
+- pr_info("%-15s %016lX %016lX %016lX\n", netdev->name,
+- netdev->state, netdev->trans_start, netdev->last_rx);
+- }
+-
+- /* Print Registers */
+- dev_info(&adapter->pdev->dev, "Register Dump\n");
+- pr_info(" Register Name Value\n");
+- for (reginfo = (struct igb_reg_info *)igb_reg_info_tbl;
+- reginfo->name; reginfo++) {
+- igb_regdump(hw, reginfo);
+- }
+-
+- /* Print TX Ring Summary */
+- if (!netdev || !netif_running(netdev))
+- goto exit;
+-
+- dev_info(&adapter->pdev->dev, "TX Rings Summary\n");
+- pr_info("Queue [NTU] [NTC] [bi(ntc)->dma ] leng ntw timestamp\n");
+- for (n = 0; n < adapter->num_tx_queues; n++) {
+- struct igb_tx_buffer *buffer_info;
+- tx_ring = adapter->tx_ring[n];
+- buffer_info = &tx_ring->tx_buffer_info[tx_ring->next_to_clean];
+- pr_info(" %5d %5X %5X %016llX %04X %p %016llX\n",
+- n, tx_ring->next_to_use, tx_ring->next_to_clean,
+- (u64)dma_unmap_addr(buffer_info, dma),
+- dma_unmap_len(buffer_info, len),
+- buffer_info->next_to_watch,
+- (u64)buffer_info->time_stamp);
+- }
+-
+- /* Print TX Rings */
+- if (!netif_msg_tx_done(adapter))
+- goto rx_ring_summary;
+-
+- dev_info(&adapter->pdev->dev, "TX Rings Dump\n");
+-
+- /* Transmit Descriptor Formats
+- *
+- * Advanced Transmit Descriptor
+- * +--------------------------------------------------------------+
+- * 0 | Buffer Address [63:0] |
+- * +--------------------------------------------------------------+
+- * 8 | PAYLEN | PORTS |CC|IDX | STA | DCMD |DTYP|MAC|RSV| DTALEN |
+- * +--------------------------------------------------------------+
+- * 63 46 45 40 39 38 36 35 32 31 24 15 0
+- */
+-
+- for (n = 0; n < adapter->num_tx_queues; n++) {
+- tx_ring = adapter->tx_ring[n];
+- pr_info("------------------------------------\n");
+- pr_info("TX QUEUE INDEX = %d\n", tx_ring->queue_index);
+- pr_info("------------------------------------\n");
+- pr_info("T [desc] [address 63:0 ] [PlPOCIStDDM Ln] [bi->dma ] leng ntw timestamp bi->skb\n");
+-
+- for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
+- const char *next_desc;
+- struct igb_tx_buffer *buffer_info;
+- tx_desc = IGB_TX_DESC(tx_ring, i);
+- buffer_info = &tx_ring->tx_buffer_info[i];
+- u0 = (struct my_u0 *)tx_desc;
+- if (i == tx_ring->next_to_use &&
+- i == tx_ring->next_to_clean)
+- next_desc = " NTC/U";
+- else if (i == tx_ring->next_to_use)
+- next_desc = " NTU";
+- else if (i == tx_ring->next_to_clean)
+- next_desc = " NTC";
+- else
+- next_desc = "";
+-
+- pr_info("T [0x%03X] %016llX %016llX %016llX %04X %p %016llX %p%s\n",
+- i, le64_to_cpu(u0->a),
+- le64_to_cpu(u0->b),
+- (u64)dma_unmap_addr(buffer_info, dma),
+- dma_unmap_len(buffer_info, len),
+- buffer_info->next_to_watch,
+- (u64)buffer_info->time_stamp,
+- buffer_info->skb, next_desc);
+-
+- if (netif_msg_pktdata(adapter) && buffer_info->skb)
+- print_hex_dump(KERN_INFO, "",
+- DUMP_PREFIX_ADDRESS,
+- 16, 1, buffer_info->skb->data,
+- dma_unmap_len(buffer_info, len),
+- true);
+- }
+- }
+-
+- /* Print RX Rings Summary */
+-rx_ring_summary:
+- dev_info(&adapter->pdev->dev, "RX Rings Summary\n");
+- pr_info("Queue [NTU] [NTC]\n");
+- for (n = 0; n < adapter->num_rx_queues; n++) {
+- rx_ring = adapter->rx_ring[n];
+- pr_info(" %5d %5X %5X\n",
+- n, rx_ring->next_to_use, rx_ring->next_to_clean);
+- }
+-
+- /* Print RX Rings */
+- if (!netif_msg_rx_status(adapter))
+- goto exit;
+-
+- dev_info(&adapter->pdev->dev, "RX Rings Dump\n");
+-
+- /* Advanced Receive Descriptor (Read) Format
+- * 63 1 0
+- * +-----------------------------------------------------+
+- * 0 | Packet Buffer Address [63:1] |A0/NSE|
+- * +----------------------------------------------+------+
+- * 8 | Header Buffer Address [63:1] | DD |
+- * +-----------------------------------------------------+
+- *
+- *
+- * Advanced Receive Descriptor (Write-Back) Format
+- *
+- * 63 48 47 32 31 30 21 20 17 16 4 3 0
+- * +------------------------------------------------------+
+- * 0 | Packet IP |SPH| HDR_LEN | RSV|Packet| RSS |
+- * | Checksum Ident | | | | Type | Type |
+- * +------------------------------------------------------+
+- * 8 | VLAN Tag | Length | Extended Error | Extended Status |
+- * +------------------------------------------------------+
+- * 63 48 47 32 31 20 19 0
+- */
+-
+- for (n = 0; n < adapter->num_rx_queues; n++) {
+- rx_ring = adapter->rx_ring[n];
+- pr_info("------------------------------------\n");
+- pr_info("RX QUEUE INDEX = %d\n", rx_ring->queue_index);
+- pr_info("------------------------------------\n");
+- pr_info("R [desc] [ PktBuf A0] [ HeadBuf DD] [bi->dma ] [bi->skb] <-- Adv Rx Read format\n");
+- pr_info("RWB[desc] [PcsmIpSHl PtRs] [vl er S cks ln] ---------------- [bi->skb] <-- Adv Rx Write-Back format\n");
+-
+- for (i = 0; i < rx_ring->count; i++) {
+- const char *next_desc;
+- struct igb_rx_buffer *buffer_info;
+- buffer_info = &rx_ring->rx_buffer_info[i];
+- rx_desc = IGB_RX_DESC(rx_ring, i);
+- u0 = (struct my_u0 *)rx_desc;
+- staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
+-
+- if (i == rx_ring->next_to_use)
+- next_desc = " NTU";
+- else if (i == rx_ring->next_to_clean)
+- next_desc = " NTC";
+- else
+- next_desc = "";
+-
+- if (staterr & E1000_RXD_STAT_DD) {
+- /* Descriptor Done */
+- pr_info("%s[0x%03X] %016llX %016llX ---------------- %s\n",
+- "RWB", i,
+- le64_to_cpu(u0->a),
+- le64_to_cpu(u0->b),
+- next_desc);
+- } else {
+- pr_info("%s[0x%03X] %016llX %016llX %016llX %s\n",
+- "R ", i,
+- le64_to_cpu(u0->a),
+- le64_to_cpu(u0->b),
+- (u64)buffer_info->dma,
+- next_desc);
+-
+- if (netif_msg_pktdata(adapter) &&
+- buffer_info->dma && buffer_info->page) {
+- print_hex_dump(KERN_INFO, "",
+- DUMP_PREFIX_ADDRESS,
+- 16, 1,
+- page_address(buffer_info->page) +
+- buffer_info->page_offset,
+- IGB_RX_BUFSZ, true);
+- }
+- }
+- }
+- }
+-
+-exit:
+- return;
+-}
+-
+-/**
+- * igb_get_i2c_data - Reads the I2C SDA data bit
+- * @hw: pointer to hardware structure
+- * @i2cctl: Current value of I2CCTL register
+- *
+- * Returns the I2C data bit value
+- **/
+-static int igb_get_i2c_data(void *data)
++static void igb_vfta_set(struct igb_adapter *adapter, u32 vid, bool add)
+ {
+- struct igb_adapter *adapter = (struct igb_adapter *)data;
+ struct e1000_hw *hw = &adapter->hw;
+- s32 i2cctl = rd32(E1000_I2CPARAMS);
++ struct e1000_host_mng_dhcp_cookie *mng_cookie = &hw->mng_cookie;
++ u32 index = (vid >> E1000_VFTA_ENTRY_SHIFT) & E1000_VFTA_ENTRY_MASK;
++ u32 mask = 1 << (vid & E1000_VFTA_ENTRY_BIT_SHIFT_MASK);
++ u32 vfta;
+
+- return !!(i2cctl & E1000_I2C_DATA_IN);
+-}
++ /*
++ * if this is the management vlan the only option is to add it in so
++ * that the management pass through will continue to work
++ */
++ if ((mng_cookie->status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN) &&
++ (vid == mng_cookie->vlan_id))
++ add = TRUE;
+
+-/**
+- * igb_set_i2c_data - Sets the I2C data bit
+- * @data: pointer to hardware structure
+- * @state: I2C data value (0 or 1) to set
+- *
+- * Sets the I2C data bit
+- **/
+-static void igb_set_i2c_data(void *data, int state)
+-{
+- struct igb_adapter *adapter = (struct igb_adapter *)data;
+- struct e1000_hw *hw = &adapter->hw;
+- s32 i2cctl = rd32(E1000_I2CPARAMS);
++ vfta = adapter->shadow_vfta[index];
+
+- if (state)
+- i2cctl |= E1000_I2C_DATA_OUT;
++ if (add)
++ vfta |= mask;
+ else
+- i2cctl &= ~E1000_I2C_DATA_OUT;
++ vfta &= ~mask;
+
+- i2cctl &= ~E1000_I2C_DATA_OE_N;
+- i2cctl |= E1000_I2C_CLK_OE_N;
+- wr32(E1000_I2CPARAMS, i2cctl);
+- wrfl();
+-
+-}
+-
+-/**
+- * igb_set_i2c_clk - Sets the I2C SCL clock
+- * @data: pointer to hardware structure
+- * @state: state to set clock
+- *
+- * Sets the I2C clock line to state
+- **/
+-static void igb_set_i2c_clk(void *data, int state)
+-{
+- struct igb_adapter *adapter = (struct igb_adapter *)data;
+- struct e1000_hw *hw = &adapter->hw;
+- s32 i2cctl = rd32(E1000_I2CPARAMS);
+-
+- if (state) {
+- i2cctl |= E1000_I2C_CLK_OUT;
+- i2cctl &= ~E1000_I2C_CLK_OE_N;
+- } else {
+- i2cctl &= ~E1000_I2C_CLK_OUT;
+- i2cctl &= ~E1000_I2C_CLK_OE_N;
+- }
+- wr32(E1000_I2CPARAMS, i2cctl);
+- wrfl();
+-}
+-
+-/**
+- * igb_get_i2c_clk - Gets the I2C SCL clock state
+- * @data: pointer to hardware structure
+- *
+- * Gets the I2C clock state
+- **/
+-static int igb_get_i2c_clk(void *data)
+-{
+- struct igb_adapter *adapter = (struct igb_adapter *)data;
+- struct e1000_hw *hw = &adapter->hw;
+- s32 i2cctl = rd32(E1000_I2CPARAMS);
+-
+- return !!(i2cctl & E1000_I2C_CLK_IN);
++ igb_e1000_write_vfta(hw, index, vfta);
++ adapter->shadow_vfta[index] = vfta;
+ }
+
+-static const struct i2c_algo_bit_data igb_i2c_algo = {
+- .setsda = igb_set_i2c_data,
+- .setscl = igb_set_i2c_clk,
+- .getsda = igb_get_i2c_data,
+- .getscl = igb_get_i2c_clk,
+- .udelay = 5,
+- .timeout = 20,
+-};
+-
+-/**
+- * igb_get_hw_dev - return device
+- * @hw: pointer to hardware structure
+- *
+- * used by hardware layer to print debugging information
+- **/
+-struct net_device *igb_get_hw_dev(struct e1000_hw *hw)
+-{
+- struct igb_adapter *adapter = hw->back;
+- return adapter->netdev;
+-}
++static int debug = NETIF_MSG_DRV | NETIF_MSG_PROBE;
++module_param(debug, int, 0);
++MODULE_PARM_DESC(debug, "Debug level (0=none, ..., 16=all)");
+
+ /**
+- * igb_init_module - Driver Registration Routine
++ * igb_init_module - Driver Registration Routine
+ *
+- * igb_init_module is the first routine called when the driver is
+- * loaded. All it does is register with the PCI subsystem.
++ * igb_init_module is the first routine called when the driver is
++ * loaded. All it does is register with the PCI subsystem.
+ **/
+ static int __init igb_init_module(void)
+ {
+@@ -674,76 +330,89 @@
+
+ pr_info("%s - version %s\n",
+ igb_driver_string, igb_driver_version);
++
+ pr_info("%s\n", igb_copyright);
++#ifdef IGB_HWMON
++/* only use IGB_PROCFS if IGB_HWMON is not defined */
++#else
++#ifdef IGB_PROCFS
++ if (igb_procfs_topdir_init())
++ pr_info("Procfs failed to initialize topdir\n");
++#endif /* IGB_PROCFS */
++#endif /* IGB_HWMON */
+
+-#ifdef CONFIG_IGB_DCA
++#ifdef IGB_DCA
+ dca_register_notify(&dca_notifier);
+ #endif
+ ret = pci_register_driver(&igb_driver);
++#ifdef USE_REBOOT_NOTIFIER
++ if (ret >= 0)
++ register_reboot_notifier(&igb_notifier_reboot);
++#endif
+ return ret;
+ }
+
+ module_init(igb_init_module);
+
+ /**
+- * igb_exit_module - Driver Exit Cleanup Routine
++ * igb_exit_module - Driver Exit Cleanup Routine
+ *
+- * igb_exit_module is called just before the driver is removed
+- * from memory.
++ * igb_exit_module is called just before the driver is removed
++ * from memory.
+ **/
+ static void __exit igb_exit_module(void)
+ {
+-#ifdef CONFIG_IGB_DCA
++#ifdef IGB_DCA
+ dca_unregister_notify(&dca_notifier);
+ #endif
++#ifdef USE_REBOOT_NOTIFIER
++ unregister_reboot_notifier(&igb_notifier_reboot);
++#endif
+ pci_unregister_driver(&igb_driver);
++
++#ifdef IGB_HWMON
++/* only compile IGB_PROCFS if IGB_HWMON is not defined */
++#else
++#ifdef IGB_PROCFS
++ igb_procfs_topdir_exit();
++#endif /* IGB_PROCFS */
++#endif /* IGB_HWMON */
+ }
+
+ module_exit(igb_exit_module);
+
+ #define Q_IDX_82576(i) (((i & 0x1) << 3) + (i >> 1))
+ /**
+- * igb_cache_ring_register - Descriptor ring to register mapping
+- * @adapter: board private structure to initialize
++ * igb_cache_ring_register - Descriptor ring to register mapping
++ * @adapter: board private structure to initialize
+ *
+- * Once we know the feature-set enabled for the device, we'll cache
+- * the register offset the descriptor ring is assigned to.
++ * Once we know the feature-set enabled for the device, we'll cache
++ * the register offset the descriptor ring is assigned to.
+ **/
+ static void igb_cache_ring_register(struct igb_adapter *adapter)
+ {
+ int i = 0, j = 0;
+ u32 rbase_offset = adapter->vfs_allocated_count;
+
+- switch (adapter->hw.mac.type) {
+- case e1000_82576:
++ if (adapter->hw.mac.type == e1000_82576) {
+ /* The queues are allocated for virtualization such that VF 0
+ * is allocated queues 0 and 8, VF 1 queues 1 and 9, etc.
+ * In order to avoid collision we start at the first free queue
+ * and continue consuming queues in the same sequence
+ */
+- if (adapter->vfs_allocated_count) {
++ if ((adapter->rss_queues > 1) && adapter->vmdq_pools) {
+ for (; i < adapter->rss_queues; i++)
+ adapter->rx_ring[i]->reg_idx = rbase_offset +
+- Q_IDX_82576(i);
++ Q_IDX_82576(i);
+ }
+- /* Fall through */
+- case e1000_82575:
+- case e1000_82580:
+- case e1000_i350:
+- case e1000_i354:
+- case e1000_i210:
+- case e1000_i211:
+- /* Fall through */
+- default:
+- for (; i < adapter->num_rx_queues; i++)
+- adapter->rx_ring[i]->reg_idx = rbase_offset + i;
+- for (; j < adapter->num_tx_queues; j++)
+- adapter->tx_ring[j]->reg_idx = rbase_offset + j;
+- break;
+ }
++ for (; i < adapter->num_rx_queues; i++)
++ adapter->rx_ring[i]->reg_idx = rbase_offset + i;
++ for (; j < adapter->num_tx_queues; j++)
++ adapter->tx_ring[j]->reg_idx = rbase_offset + j;
+ }
+
+-u32 igb_rd32(struct e1000_hw *hw, u32 reg)
++u32 e1000_read_reg(struct e1000_hw *hw, u32 reg)
+ {
+ struct igb_adapter *igb = container_of(hw, struct igb_adapter, hw);
+ u8 __iomem *hw_addr = ACCESS_ONCE(hw->hw_addr);
+@@ -757,6 +426,7 @@
+ /* reads should not return all F's */
+ if (!(~value) && (!reg || !(~readl(hw_addr)))) {
+ struct net_device *netdev = igb->netdev;
++
+ hw->hw_addr = NULL;
+ netif_device_detach(netdev);
+ netdev_err(netdev, "PCIe link lost, device now detached\n");
+@@ -765,6 +435,42 @@
+ return value;
+ }
+
++static void igb_configure_lli(struct igb_adapter *adapter)
++{
++ struct e1000_hw *hw = &adapter->hw;
++ u16 port;
++
++ /* LLI should only be enabled for MSI-X or MSI interrupts */
++ if (!adapter->msix_entries && !(adapter->flags & IGB_FLAG_HAS_MSI))
++ return;
++
++ if (adapter->lli_port) {
++ /* use filter 0 for port */
++ port = htons((u16)adapter->lli_port);
++ E1000_WRITE_REG(hw, E1000_IMIR(0),
++ (port | E1000_IMIR_PORT_IM_EN));
++ E1000_WRITE_REG(hw, E1000_IMIREXT(0),
++ (E1000_IMIREXT_SIZE_BP | E1000_IMIREXT_CTRL_BP));
++ }
++
++ if (adapter->flags & IGB_FLAG_LLI_PUSH) {
++ /* use filter 1 for push flag */
++ E1000_WRITE_REG(hw, E1000_IMIR(1),
++ (E1000_IMIR_PORT_BP | E1000_IMIR_PORT_IM_EN));
++ E1000_WRITE_REG(hw, E1000_IMIREXT(1),
++ (E1000_IMIREXT_SIZE_BP | E1000_IMIREXT_CTRL_PSH));
++ }
++
++ if (adapter->lli_size) {
++ /* use filter 2 for size */
++ E1000_WRITE_REG(hw, E1000_IMIR(2),
++ (E1000_IMIR_PORT_BP | E1000_IMIR_PORT_IM_EN));
++ E1000_WRITE_REG(hw, E1000_IMIREXT(2),
++ (adapter->lli_size | E1000_IMIREXT_CTRL_BP));
++ }
++
++}
++
+ /**
+ * igb_write_ivar - configure ivar for given MSI-X vector
+ * @hw: pointer to the HW structure
+@@ -780,7 +486,7 @@
+ static void igb_write_ivar(struct e1000_hw *hw, int msix_vector,
+ int index, int offset)
+ {
+- u32 ivar = array_rd32(E1000_IVAR0, index);
++ u32 ivar = E1000_READ_REG_ARRAY(hw, E1000_IVAR0, index);
+
+ /* clear any bits that are currently set */
+ ivar &= ~((u32)0xFF << offset);
+@@ -788,7 +494,7 @@
+ /* write vector and valid bit */
+ ivar |= (msix_vector | E1000_IVAR_VALID) << offset;
+
+- array_wr32(E1000_IVAR0, index, ivar);
++ E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, index, ivar);
+ }
+
+ #define IGB_N0_QUEUE -1
+@@ -816,13 +522,14 @@
+ msixbm = E1000_EICR_RX_QUEUE0 << rx_queue;
+ if (tx_queue > IGB_N0_QUEUE)
+ msixbm |= E1000_EICR_TX_QUEUE0 << tx_queue;
+- if (!(adapter->flags & IGB_FLAG_HAS_MSIX) && msix_vector == 0)
++ if (!adapter->msix_entries && msix_vector == 0)
+ msixbm |= E1000_EIMS_OTHER;
+- array_wr32(E1000_MSIXBM(0), msix_vector, msixbm);
++ E1000_WRITE_REG_ARRAY(hw, E1000_MSIXBM(0), msix_vector, msixbm);
+ q_vector->eims_value = msixbm;
+ break;
+ case e1000_82576:
+- /* 82576 uses a table that essentially consists of 2 columns
++ /*
++ * 82576 uses a table that essentially consists of 2 columns
+ * with 8 rows. The ordering is column-major so we use the
+ * lower 3 bits as the row index, and the 4th bit as the
+ * column offset.
+@@ -842,7 +549,8 @@
+ case e1000_i354:
+ case e1000_i210:
+ case e1000_i211:
+- /* On 82580 and newer adapters the scheme is similar to 82576
++ /*
++ * On 82580 and newer adapters the scheme is similar to 82576
+ * however instead of ordering column-major we have things
+ * ordered row-major. So we traverse the table by using
+ * bit 0 as the column offset, and the remaining bits as the
+@@ -871,11 +579,10 @@
+ }
+
+ /**
+- * igb_configure_msix - Configure MSI-X hardware
+- * @adapter: board private structure to initialize
++ * igb_configure_msix - Configure MSI-X hardware
+ *
+- * igb_configure_msix sets up the hardware to properly
+- * generate MSI-X interrupts.
++ * igb_configure_msix sets up the hardware to properly
++ * generate MSI-X interrupts.
+ **/
+ static void igb_configure_msix(struct igb_adapter *adapter)
+ {
+@@ -888,7 +595,7 @@
+ /* set vector for other causes, i.e. link changes */
+ switch (hw->mac.type) {
+ case e1000_82575:
+- tmp = rd32(E1000_CTRL_EXT);
++ tmp = E1000_READ_REG(hw, E1000_CTRL_EXT);
+ /* enable MSI-X PBA support*/
+ tmp |= E1000_CTRL_EXT_PBA_CLR;
+
+@@ -896,10 +603,11 @@
+ tmp |= E1000_CTRL_EXT_EIAME;
+ tmp |= E1000_CTRL_EXT_IRCA;
+
+- wr32(E1000_CTRL_EXT, tmp);
++ E1000_WRITE_REG(hw, E1000_CTRL_EXT, tmp);
+
+ /* enable msix_other interrupt */
+- array_wr32(E1000_MSIXBM(0), vector++, E1000_EIMS_OTHER);
++ E1000_WRITE_REG_ARRAY(hw, E1000_MSIXBM(0), vector++,
++ E1000_EIMS_OTHER);
+ adapter->eims_other = E1000_EIMS_OTHER;
+
+ break;
+@@ -913,15 +621,15 @@
+ /* Turn on MSI-X capability first, or our settings
+ * won't stick. And it will take days to debug.
+ */
+- wr32(E1000_GPIE, E1000_GPIE_MSIX_MODE |
+- E1000_GPIE_PBA | E1000_GPIE_EIAME |
+- E1000_GPIE_NSICR);
++ E1000_WRITE_REG(hw, E1000_GPIE, E1000_GPIE_MSIX_MODE |
++ E1000_GPIE_PBA | E1000_GPIE_EIAME |
++ E1000_GPIE_NSICR);
+
+ /* enable msix_other interrupt */
+ adapter->eims_other = 1 << vector;
+ tmp = (vector++ | E1000_IVAR_VALID) << 8;
+
+- wr32(E1000_IVAR_MISC, tmp);
++ E1000_WRITE_REG(hw, E1000_IVAR_MISC, tmp);
+ break;
+ default:
+ /* do nothing, since nothing else supports MSI-X */
+@@ -933,24 +641,22 @@
+ for (i = 0; i < adapter->num_q_vectors; i++)
+ igb_assign_vector(adapter->q_vector[i], vector++);
+
+- wrfl();
++ E1000_WRITE_FLUSH(hw);
+ }
+
+ /**
+- * igb_request_msix - Initialize MSI-X interrupts
+- * @adapter: board private structure to initialize
++ * igb_request_msix - Initialize MSI-X interrupts
+ *
+- * igb_request_msix allocates MSI-X vectors and requests interrupts from the
+- * kernel.
++ * igb_request_msix allocates MSI-X vectors and requests interrupts from the
++ * kernel.
+ **/
+ static int igb_request_msix(struct igb_adapter *adapter)
+ {
+ struct net_device *netdev = adapter->netdev;
+- struct e1000_hw *hw = &adapter->hw;
+ int i, err = 0, vector = 0, free_vector = 0;
+
+ err = request_irq(adapter->msix_entries[vector].vector,
+- igb_msix_other, 0, netdev->name, adapter);
++ &igb_msix_other, 0, netdev->name, adapter);
+ if (err)
+ goto err_out;
+
+@@ -959,7 +665,7 @@
+
+ vector++;
+
+- q_vector->itr_register = hw->hw_addr + E1000_EITR(vector);
++ q_vector->itr_register = adapter->io_addr + E1000_EITR(vector);
+
+ if (q_vector->rx.ring && q_vector->tx.ring)
+ sprintf(q_vector->name, "%s-TxRx-%u", netdev->name,
+@@ -997,11 +703,11 @@
+ }
+
+ /**
+- * igb_free_q_vector - Free memory allocated for specific interrupt vector
+- * @adapter: board private structure to initialize
+- * @v_idx: Index of vector to be freed
++ * igb_free_q_vector - Free memory allocated for specific interrupt vector
++ * @adapter: board private structure to initialize
++ * @v_idx: Index of vector to be freed
+ *
+- * This function frees the memory allocated to the q_vector.
++ * This function frees the memory allocated to the q_vector.
+ **/
+ static void igb_free_q_vector(struct igb_adapter *adapter, int v_idx)
+ {
+@@ -1013,6 +719,10 @@
+ * we must wait a grace period before freeing it.
+ */
+ kfree_rcu(q_vector, rcu);
++
++#ifndef IGB_NO_LRO
++ __skb_queue_purge(&q_vector->lrolist.active);
++#endif
+ }
+
+ /**
+@@ -1027,8 +737,8 @@
+ {
+ struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
+
+- /* Coming from igb_set_interrupt_capability, the vectors are not yet
+- * allocated. So, q_vector is NULL so we should stop here.
++ /* if we're coming from igb_set_interrupt_capability, the vectors are
++ * not yet allocated
+ */
+ if (!q_vector)
+ return;
+@@ -1047,22 +757,25 @@
+ {
+ int v_idx = adapter->num_q_vectors;
+
+- if (adapter->flags & IGB_FLAG_HAS_MSIX)
++ if (adapter->msix_entries) {
+ pci_disable_msix(adapter->pdev);
+- else if (adapter->flags & IGB_FLAG_HAS_MSI)
++ kfree(adapter->msix_entries);
++ adapter->msix_entries = NULL;
++ } else if (adapter->flags & IGB_FLAG_HAS_MSI) {
+ pci_disable_msi(adapter->pdev);
++ }
+
+ while (v_idx--)
+ igb_reset_q_vector(adapter, v_idx);
+ }
+
+ /**
+- * igb_free_q_vectors - Free memory allocated for interrupt vectors
+- * @adapter: board private structure to initialize
++ * igb_free_q_vectors - Free memory allocated for interrupt vectors
++ * @adapter: board private structure to initialize
+ *
+- * This function frees the memory allocated to the q_vectors. In addition if
+- * NAPI is enabled it will delete any references to the NAPI struct prior
+- * to freeing the q_vector.
++ * This function frees the memory allocated to the q_vectors. In addition if
++ * NAPI is enabled it will delete any references to the NAPI struct prior
++ * to freeing the q_vector.
+ **/
+ static void igb_free_q_vectors(struct igb_adapter *adapter)
+ {
+@@ -1079,11 +792,10 @@
+ }
+
+ /**
+- * igb_clear_interrupt_scheme - reset the device to a state of no interrupts
+- * @adapter: board private structure to initialize
++ * igb_clear_interrupt_scheme - reset the device to a state of no interrupts
+ *
+- * This function resets the device so that it has 0 Rx queues, Tx queues, and
+- * MSI-X interrupts allocated.
++ * This function resets the device so that it has 0 rx queues, tx queues, and
++ * MSI-X interrupts allocated.
+ */
+ static void igb_clear_interrupt_scheme(struct igb_adapter *adapter)
+ {
+@@ -1092,108 +804,306 @@
+ }
+
+ /**
+- * igb_set_interrupt_capability - set MSI or MSI-X if supported
+- * @adapter: board private structure to initialize
+- * @msix: boolean value of MSIX capability
++ * igb_process_mdd_event
++ * @adapter - board private structure
+ *
+- * Attempt to configure interrupts using the best available
+- * capabilities of the hardware and kernel.
+- **/
+-static void igb_set_interrupt_capability(struct igb_adapter *adapter, bool msix)
++ * Identify a malicious VF, disable the VF TX/RX queues and log a message.
++ */
++static void igb_process_mdd_event(struct igb_adapter *adapter)
+ {
+- int err;
+- int numvecs, i;
+-
+- if (!msix)
+- goto msi_only;
+- adapter->flags |= IGB_FLAG_HAS_MSIX;
+-
+- /* Number of supported queues. */
+- adapter->num_rx_queues = adapter->rss_queues;
+- if (adapter->vfs_allocated_count)
+- adapter->num_tx_queues = 1;
+- else
+- adapter->num_tx_queues = adapter->rss_queues;
++ struct e1000_hw *hw = &adapter->hw;
++ u32 lvmmc, vfte, vfre, mdfb;
++ u8 vf_queue;
+
+- /* start with one vector for every Rx queue */
+- numvecs = adapter->num_rx_queues;
++ lvmmc = E1000_READ_REG(hw, E1000_LVMMC);
++ vf_queue = lvmmc >> 29;
+
+- /* if Tx handler is separate add 1 for every Tx queue */
+- if (!(adapter->flags & IGB_FLAG_QUEUE_PAIRS))
+- numvecs += adapter->num_tx_queues;
+-
+- /* store the number of vectors reserved for queues */
+- adapter->num_q_vectors = numvecs;
+-
+- /* add 1 vector for link status interrupts */
+- numvecs++;
+- for (i = 0; i < numvecs; i++)
+- adapter->msix_entries[i].entry = i;
+-
+- err = pci_enable_msix_range(adapter->pdev,
+- adapter->msix_entries,
+- numvecs,
+- numvecs);
+- if (err > 0)
++ /* VF index cannot be bigger or equal to VFs allocated */
++ if (vf_queue >= adapter->vfs_allocated_count)
+ return;
+
+- igb_reset_interrupt_capability(adapter);
++ netdev_info(adapter->netdev,
++ "VF %d misbehaved. VF queues are disabled. VM misbehavior code is 0x%x\n",
++ vf_queue, lvmmc);
+
+- /* If we can't do MSI-X, try MSI */
+-msi_only:
+- adapter->flags &= ~IGB_FLAG_HAS_MSIX;
+-#ifdef CONFIG_PCI_IOV
+- /* disable SR-IOV for non MSI-X configurations */
+- if (adapter->vf_data) {
+- struct e1000_hw *hw = &adapter->hw;
+- /* disable iov and allow time for transactions to clear */
+- pci_disable_sriov(adapter->pdev);
+- msleep(500);
++ /* Disable VFTE and VFRE related bits */
++ vfte = E1000_READ_REG(hw, E1000_VFTE);
++ vfte &= ~(1 << vf_queue);
++ E1000_WRITE_REG(hw, E1000_VFTE, vfte);
+
+- kfree(adapter->vf_data);
+- adapter->vf_data = NULL;
+- wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ);
+- wrfl();
+- msleep(100);
+- dev_info(&adapter->pdev->dev, "IOV Disabled\n");
+- }
+-#endif
+- adapter->vfs_allocated_count = 0;
+- adapter->rss_queues = 1;
+- adapter->flags |= IGB_FLAG_QUEUE_PAIRS;
+- adapter->num_rx_queues = 1;
+- adapter->num_tx_queues = 1;
+- adapter->num_q_vectors = 1;
+- if (!pci_enable_msi(adapter->pdev))
+- adapter->flags |= IGB_FLAG_HAS_MSI;
+-}
++ vfre = E1000_READ_REG(hw, E1000_VFRE);
++ vfre &= ~(1 << vf_queue);
++ E1000_WRITE_REG(hw, E1000_VFRE, vfre);
+
+-static void igb_add_ring(struct igb_ring *ring,
+- struct igb_ring_container *head)
+-{
+- head->ring = ring;
+- head->count++;
++ /* Disable MDFB related bit. Clear on write */
++ mdfb = E1000_READ_REG(hw, E1000_MDFB);
++ mdfb |= (1 << vf_queue);
++ E1000_WRITE_REG(hw, E1000_MDFB, mdfb);
++
++ /* Reset the specific VF */
++ E1000_WRITE_REG(hw, E1000_VTCTRL(vf_queue), E1000_VTCTRL_RST);
+ }
+
+ /**
+- * igb_alloc_q_vector - Allocate memory for a single interrupt vector
+- * @adapter: board private structure to initialize
+- * @v_count: q_vectors allocated on adapter, used for ring interleaving
+- * @v_idx: index of vector in adapter struct
+- * @txr_count: total number of Tx rings to allocate
+- * @txr_idx: index of first Tx ring to allocate
+- * @rxr_count: total number of Rx rings to allocate
+- * @rxr_idx: index of first Rx ring to allocate
++ * igb_disable_mdd
++ * @adapter - board private structure
+ *
+- * We allocate one q_vector. If allocation fails we return -ENOMEM.
++ * Disable MDD behavior in the HW
+ **/
+-static int igb_alloc_q_vector(struct igb_adapter *adapter,
+- int v_count, int v_idx,
+- int txr_count, int txr_idx,
+- int rxr_count, int rxr_idx)
++static void igb_disable_mdd(struct igb_adapter *adapter)
+ {
+- struct igb_q_vector *q_vector;
+- struct igb_ring *ring;
++ struct e1000_hw *hw = &adapter->hw;
++ u32 reg;
++
++ if ((hw->mac.type != e1000_i350) &&
++ (hw->mac.type != e1000_i354))
++ return;
++
++ reg = E1000_READ_REG(hw, E1000_DTXCTL);
++ reg &= (~E1000_DTXCTL_MDP_EN);
++ E1000_WRITE_REG(hw, E1000_DTXCTL, reg);
++}
++
++/**
++ * igb_enable_mdd
++ * @adapter - board private structure
++ *
++ * Enable the HW to detect malicious driver and sends an interrupt to
++ * the driver.
++ **/
++static void igb_enable_mdd(struct igb_adapter *adapter)
++{
++ struct e1000_hw *hw = &adapter->hw;
++ u32 reg;
++
++ /* Only available on i350 device */
++ if (hw->mac.type != e1000_i350)
++ return;
++
++ reg = E1000_READ_REG(hw, E1000_DTXCTL);
++ reg |= E1000_DTXCTL_MDP_EN;
++ E1000_WRITE_REG(hw, E1000_DTXCTL, reg);
++}
++
++/**
++ * igb_reset_sriov_capability - disable SR-IOV if enabled
++ *
++ * Attempt to disable single root IO virtualization capabilites present in the
++ * kernel.
++ **/
++static void igb_reset_sriov_capability(struct igb_adapter *adapter)
++{
++ struct pci_dev *pdev = adapter->pdev;
++ struct e1000_hw *hw = &adapter->hw;
++
++ /* reclaim resources allocated to VFs */
++ if (adapter->vf_data) {
++ if (!pci_vfs_assigned(pdev)) {
++ /*
++ * disable iov and allow time for transactions to
++ * clear
++ */
++ pci_disable_sriov(pdev);
++ msleep(500);
++
++ dev_info(pci_dev_to_dev(pdev), "IOV Disabled\n");
++ } else {
++ dev_info(pci_dev_to_dev(pdev),
++ "IOV Not Disabled\n VF(s) are assigned to guests!\n");
++ }
++ /* Disable Malicious Driver Detection */
++ igb_disable_mdd(adapter);
++
++ /* free vf data storage */
++ kfree(adapter->vf_data);
++ adapter->vf_data = NULL;
++
++ /* switch rings back to PF ownership */
++ E1000_WRITE_REG(hw, E1000_IOVCTL,
++ E1000_IOVCTL_REUSE_VFQ);
++ E1000_WRITE_FLUSH(hw);
++ msleep(100);
++ }
++
++ adapter->vfs_allocated_count = 0;
++}
++
++/**
++ * igb_set_sriov_capability - setup SR-IOV if supported
++ *
++ * Attempt to enable single root IO virtualization capabilites present in the
++ * kernel.
++ **/
++static void igb_set_sriov_capability(struct igb_adapter *adapter)
++{
++ struct pci_dev *pdev = adapter->pdev;
++ int old_vfs = 0;
++ int i;
++
++ old_vfs = pci_num_vf(pdev);
++ if (old_vfs) {
++ dev_info(pci_dev_to_dev(pdev),
++ "%d pre-allocated VFs found - override max_vfs setting of %d\n",
++ old_vfs, adapter->vfs_allocated_count);
++ adapter->vfs_allocated_count = old_vfs;
++ }
++ /* no VFs requested, do nothing */
++ if (!adapter->vfs_allocated_count)
++ return;
++
++ /* allocate vf data storage */
++ adapter->vf_data = kcalloc(adapter->vfs_allocated_count,
++ sizeof(struct vf_data_storage),
++ GFP_KERNEL);
++
++ if (adapter->vf_data) {
++ if (!old_vfs) {
++ if (pci_enable_sriov(pdev,
++ adapter->vfs_allocated_count))
++ goto err_out;
++ dev_warn(pci_dev_to_dev(pdev),
++ "SR-IOV has been enabled: configure port VLANs to keep your VFs secure\n");
++ }
++ for (i = 0; i < adapter->vfs_allocated_count; i++)
++ igb_vf_configure(adapter, i);
++
++ switch (adapter->hw.mac.type) {
++ case e1000_82576:
++ case e1000_i350:
++ /* Enable VM to VM loopback by default */
++ adapter->flags |= IGB_FLAG_LOOPBACK_ENABLE;
++ break;
++ default:
++ /* Currently no other hardware supports loopback */
++ break;
++ }
++
++ /* DMA Coalescing is not supported in IOV mode. */
++ if (adapter->hw.mac.type >= e1000_i350)
++ adapter->dmac = IGB_DMAC_DISABLE;
++ if (adapter->hw.mac.type < e1000_i350)
++ adapter->flags |= IGB_FLAG_DETECT_BAD_DMA;
++ return;
++
++ }
++
++err_out:
++ kfree(adapter->vf_data);
++ adapter->vf_data = NULL;
++ adapter->vfs_allocated_count = 0;
++ dev_warn(pci_dev_to_dev(pdev),
++ "Failed to initialize SR-IOV virtualization\n");
++}
++
++/**
++ * igb_set_interrupt_capability - set MSI or MSI-X if supported
++ *
++ * Attempt to configure interrupts using the best available
++ * capabilities of the hardware and kernel.
++ **/
++static void igb_set_interrupt_capability(struct igb_adapter *adapter, bool msix)
++{
++ struct pci_dev *pdev = adapter->pdev;
++ int err;
++ int numvecs, i;
++
++ if (!msix)
++ adapter->int_mode = IGB_INT_MODE_MSI;
++
++ /* Number of supported queues. */
++ adapter->num_rx_queues = adapter->rss_queues;
++
++ if (adapter->vmdq_pools > 1)
++ adapter->num_rx_queues += adapter->vmdq_pools - 1;
++
++#ifdef HAVE_TX_MQ
++ if (adapter->vmdq_pools)
++ adapter->num_tx_queues = adapter->vmdq_pools;
++ else
++ adapter->num_tx_queues = adapter->num_rx_queues;
++#else
++ adapter->num_tx_queues = max_t(u32, 1, adapter->vmdq_pools);
++#endif
++
++ switch (adapter->int_mode) {
++ case IGB_INT_MODE_MSIX:
++ /* start with one vector for every Tx/Rx queue */
++ numvecs = max_t(int, adapter->num_tx_queues,
++ adapter->num_rx_queues);
++
++ /* if tx handler is separate make it 1 for every queue */
++ if (!(adapter->flags & IGB_FLAG_QUEUE_PAIRS))
++ numvecs = adapter->num_tx_queues +
++ adapter->num_rx_queues;
++
++ /* store the number of vectors reserved for queues */
++ adapter->num_q_vectors = numvecs;
++
++ /* add 1 vector for link status interrupts */
++ numvecs++;
++ adapter->msix_entries = kcalloc(numvecs,
++ sizeof(struct msix_entry),
++ GFP_KERNEL);
++ if (adapter->msix_entries) {
++ for (i = 0; i < numvecs; i++)
++ adapter->msix_entries[i].entry = i;
++
++ err = pci_enable_msix(pdev,
++ adapter->msix_entries, numvecs);
++ if (err == 0)
++ break;
++ }
++ /* MSI-X failed, so fall through and try MSI */
++ dev_warn(pci_dev_to_dev(pdev),
++ "Failed to initialize MSI-X interrupts. Falling back to MSI interrupts.\n");
++ igb_reset_interrupt_capability(adapter);
++ case IGB_INT_MODE_MSI:
++ if (!pci_enable_msi(pdev))
++ adapter->flags |= IGB_FLAG_HAS_MSI;
++ else
++ dev_warn(pci_dev_to_dev(pdev),
++ "Failed to initialize MSI interrupts. Falling back to legacy interrupts.\n");
++ /* Fall through */
++ case IGB_INT_MODE_LEGACY:
++ /* disable advanced features and set number of queues to 1 */
++ igb_reset_sriov_capability(adapter);
++ adapter->vmdq_pools = 0;
++ adapter->rss_queues = 1;
++ adapter->flags |= IGB_FLAG_QUEUE_PAIRS;
++ adapter->num_rx_queues = 1;
++ adapter->num_tx_queues = 1;
++ adapter->num_q_vectors = 1;
++ /* Don't do anything; this is system default */
++ break;
++ }
++}
++
++static void igb_add_ring(struct igb_ring *ring,
++ struct igb_ring_container *head)
++{
++ head->ring = ring;
++ head->count++;
++}
++
++/**
++ * igb_alloc_q_vector - Allocate memory for a single interrupt vector
++ * @adapter: board private structure to initialize
++ * @v_count: q_vectors allocated on adapter, used for ring interleaving
++ * @v_idx: index of vector in adapter struct
++ * @txr_count: total number of Tx rings to allocate
++ * @txr_idx: index of first Tx ring to allocate
++ * @rxr_count: total number of Rx rings to allocate
++ * @rxr_idx: index of first Rx ring to allocate
++ *
++ * We allocate one q_vector. If allocation fails we return -ENOMEM.
++ **/
++static int igb_alloc_q_vector(struct igb_adapter *adapter,
++ unsigned int v_count, unsigned int v_idx,
++ unsigned int txr_count, unsigned int txr_idx,
++ unsigned int rxr_count, unsigned int rxr_idx)
++{
++ struct igb_q_vector *q_vector;
++ struct igb_ring *ring;
+ int ring_count, size;
+
+ /* igb only supports 1 Tx and/or 1 Rx queue per vector */
+@@ -1206,17 +1116,18 @@
+
+ /* allocate q_vector and rings */
+ q_vector = adapter->q_vector[v_idx];
+- if (!q_vector) {
+- q_vector = kzalloc(size, GFP_KERNEL);
+- } else if (size > ksize(q_vector)) {
+- kfree_rcu(q_vector, rcu);
++ if (!q_vector)
+ q_vector = kzalloc(size, GFP_KERNEL);
+- } else {
++ else
+ memset(q_vector, 0, size);
+- }
+ if (!q_vector)
+ return -ENOMEM;
+
++#ifndef IGB_NO_LRO
++ /* initialize LRO */
++ __skb_queue_head_init(&q_vector->lrolist.active);
++
++#endif
+ /* initialize NAPI */
+ netif_napi_add(adapter->netdev, &q_vector->napi,
+ igb_poll, 64);
+@@ -1229,7 +1140,7 @@
+ q_vector->tx.work_limit = adapter->tx_work_limit;
+
+ /* initialize ITR configuration */
+- q_vector->itr_register = adapter->hw.hw_addr + E1000_EITR(0);
++ q_vector->itr_register = adapter->io_addr + E1000_EITR(0);
+ q_vector->itr_val = IGB_START_ITR;
+
+ /* initialize pointer to rings */
+@@ -1265,9 +1176,6 @@
+ ring->count = adapter->tx_ring_count;
+ ring->queue_index = txr_idx;
+
+- u64_stats_init(&ring->tx_syncp);
+- u64_stats_init(&ring->tx_syncp2);
+-
+ /* assign ring to adapter */
+ adapter->tx_ring[txr_idx] = ring;
+
+@@ -1286,22 +1194,23 @@
+ /* update q_vector Rx values */
+ igb_add_ring(ring, &q_vector->rx);
+
++#if defined(HAVE_RHEL6_NET_DEVICE_OPS_EXT) || !defined(HAVE_NDO_SET_FEATURES)
++ /* enable rx checksum */
++ set_bit(IGB_RING_FLAG_RX_CSUM, &ring->flags);
++
++#endif
+ /* set flag indicating ring supports SCTP checksum offload */
+ if (adapter->hw.mac.type >= e1000_82576)
+ set_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags);
+
+- /* On i350, i354, i210, and i211, loopback VLAN packets
+- * have the tag byte-swapped.
+- */
+- if (adapter->hw.mac.type >= e1000_i350)
++ if ((adapter->hw.mac.type == e1000_i350) ||
++ (adapter->hw.mac.type == e1000_i354))
+ set_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &ring->flags);
+
+ /* apply Rx specific ring traits */
+ ring->count = adapter->rx_ring_count;
+ ring->queue_index = rxr_idx;
+
+- u64_stats_init(&ring->rx_syncp);
+-
+ /* assign ring to adapter */
+ adapter->rx_ring[rxr_idx] = ring;
+ }
+@@ -1309,13 +1218,12 @@
+ return 0;
+ }
+
+-
+ /**
+- * igb_alloc_q_vectors - Allocate memory for interrupt vectors
+- * @adapter: board private structure to initialize
++ * igb_alloc_q_vectors - Allocate memory for interrupt vectors
++ * @adapter: board private structure to initialize
+ *
+- * We allocate one q_vector per queue interrupt. If allocation fails we
+- * return -ENOMEM.
++ * We allocate one q_vector per queue interrupt. If allocation fails we
++ * return -ENOMEM.
+ **/
+ static int igb_alloc_q_vectors(struct igb_adapter *adapter)
+ {
+@@ -1370,11 +1278,9 @@
+ }
+
+ /**
+- * igb_init_interrupt_scheme - initialize interrupts, allocate queues/vectors
+- * @adapter: board private structure to initialize
+- * @msix: boolean value of MSIX capability
++ * igb_init_interrupt_scheme - initialize interrupts, allocate queues/vectors
+ *
+- * This function initializes the interrupts and allocates all of the queues.
++ * This function initializes the interrupts and allocates all of the queues.
+ **/
+ static int igb_init_interrupt_scheme(struct igb_adapter *adapter, bool msix)
+ {
+@@ -1385,7 +1291,7 @@
+
+ err = igb_alloc_q_vectors(adapter);
+ if (err) {
+- dev_err(&pdev->dev, "Unable to allocate memory for vectors\n");
++ dev_err(pci_dev_to_dev(pdev), "Unable to allocate memory for vectors\n");
+ goto err_alloc_q_vectors;
+ }
+
+@@ -1399,11 +1305,10 @@
+ }
+
+ /**
+- * igb_request_irq - initialize interrupts
+- * @adapter: board private structure to initialize
++ * igb_request_irq - initialize interrupts
+ *
+- * Attempts to configure interrupts using the best available
+- * capabilities of the hardware and kernel.
++ * Attempts to configure interrupts using the best available
++ * capabilities of the hardware and kernel.
+ **/
+ static int igb_request_irq(struct igb_adapter *adapter)
+ {
+@@ -1411,7 +1316,7 @@
+ struct pci_dev *pdev = adapter->pdev;
+ int err = 0;
+
+- if (adapter->flags & IGB_FLAG_HAS_MSIX) {
++ if (adapter->msix_entries) {
+ err = igb_request_msix(adapter);
+ if (!err)
+ goto request_done;
+@@ -1420,10 +1325,10 @@
+ igb_free_all_rx_resources(adapter);
+
+ igb_clear_interrupt_scheme(adapter);
++ igb_reset_sriov_capability(adapter);
+ err = igb_init_interrupt_scheme(adapter, false);
+ if (err)
+ goto request_done;
+-
+ igb_setup_all_tx_resources(adapter);
+ igb_setup_all_rx_resources(adapter);
+ igb_configure(adapter);
+@@ -1432,7 +1337,7 @@
+ igb_assign_vector(adapter->q_vector[0], 0);
+
+ if (adapter->flags & IGB_FLAG_HAS_MSI) {
+- err = request_irq(pdev->irq, igb_intr_msi, 0,
++ err = request_irq(pdev->irq, &igb_intr_msi, 0,
+ netdev->name, adapter);
+ if (!err)
+ goto request_done;
+@@ -1442,11 +1347,11 @@
+ adapter->flags &= ~IGB_FLAG_HAS_MSI;
+ }
+
+- err = request_irq(pdev->irq, igb_intr, IRQF_SHARED,
++ err = request_irq(pdev->irq, &igb_intr, IRQF_SHARED,
+ netdev->name, adapter);
+
+ if (err)
+- dev_err(&pdev->dev, "Error %d getting interrupt\n",
++ dev_err(pci_dev_to_dev(pdev), "Error %d getting interrupt\n",
+ err);
+
+ request_done:
+@@ -1455,7 +1360,7 @@
+
+ static void igb_free_irq(struct igb_adapter *adapter)
+ {
+- if (adapter->flags & IGB_FLAG_HAS_MSIX) {
++ if (adapter->msix_entries) {
+ int vector = 0, i;
+
+ free_irq(adapter->msix_entries[vector++].vector, adapter);
+@@ -1469,64 +1374,76 @@
+ }
+
+ /**
+- * igb_irq_disable - Mask off interrupt generation on the NIC
+- * @adapter: board private structure
++ * igb_irq_disable - Mask off interrupt generation on the NIC
++ * @adapter: board private structure
+ **/
+ static void igb_irq_disable(struct igb_adapter *adapter)
+ {
+ struct e1000_hw *hw = &adapter->hw;
+
+- /* we need to be careful when disabling interrupts. The VFs are also
++ /*
++ * we need to be careful when disabling interrupts. The VFs are also
+ * mapped into these registers and so clearing the bits can cause
+ * issues on the VF drivers so we only need to clear what we set
+ */
+- if (adapter->flags & IGB_FLAG_HAS_MSIX) {
+- u32 regval = rd32(E1000_EIAM);
++ if (adapter->msix_entries) {
++ u32 regval = E1000_READ_REG(hw, E1000_EIAM);
+
+- wr32(E1000_EIAM, regval & ~adapter->eims_enable_mask);
+- wr32(E1000_EIMC, adapter->eims_enable_mask);
+- regval = rd32(E1000_EIAC);
+- wr32(E1000_EIAC, regval & ~adapter->eims_enable_mask);
+- }
++ E1000_WRITE_REG(hw, E1000_EIAM, regval
++ & ~adapter->eims_enable_mask);
++ E1000_WRITE_REG(hw, E1000_EIMC, adapter->eims_enable_mask);
++ regval = E1000_READ_REG(hw, E1000_EIAC);
++ E1000_WRITE_REG(hw, E1000_EIAC, regval
++ & ~adapter->eims_enable_mask);
++ }
++
++ E1000_WRITE_REG(hw, E1000_IAM, 0);
++ E1000_WRITE_REG(hw, E1000_IMC, ~0);
++ E1000_WRITE_FLUSH(hw);
+
+- wr32(E1000_IAM, 0);
+- wr32(E1000_IMC, ~0);
+- wrfl();
+- if (adapter->flags & IGB_FLAG_HAS_MSIX) {
+- int i;
++ if (adapter->msix_entries) {
++ int vector = 0, i;
++
++ synchronize_irq(adapter->msix_entries[vector++].vector);
+
+ for (i = 0; i < adapter->num_q_vectors; i++)
+- synchronize_irq(adapter->msix_entries[i].vector);
++ synchronize_irq(adapter->msix_entries[vector++].vector);
+ } else {
+ synchronize_irq(adapter->pdev->irq);
+ }
+ }
+
+ /**
+- * igb_irq_enable - Enable default interrupt generation settings
+- * @adapter: board private structure
++ * igb_irq_enable - Enable default interrupt generation settings
++ * @adapter: board private structure
+ **/
+ static void igb_irq_enable(struct igb_adapter *adapter)
+ {
+ struct e1000_hw *hw = &adapter->hw;
+
+- if (adapter->flags & IGB_FLAG_HAS_MSIX) {
++ if (adapter->msix_entries) {
+ u32 ims = E1000_IMS_LSC | E1000_IMS_DOUTSYNC | E1000_IMS_DRSTA;
+- u32 regval = rd32(E1000_EIAC);
++ u32 regval = E1000_READ_REG(hw, E1000_EIAC);
+
+- wr32(E1000_EIAC, regval | adapter->eims_enable_mask);
+- regval = rd32(E1000_EIAM);
+- wr32(E1000_EIAM, regval | adapter->eims_enable_mask);
+- wr32(E1000_EIMS, adapter->eims_enable_mask);
++ E1000_WRITE_REG(hw, E1000_EIAC, regval
++ | adapter->eims_enable_mask);
++ regval = E1000_READ_REG(hw, E1000_EIAM);
++ E1000_WRITE_REG(hw, E1000_EIAM, regval
++ | adapter->eims_enable_mask);
++ E1000_WRITE_REG(hw, E1000_EIMS, adapter->eims_enable_mask);
+ if (adapter->vfs_allocated_count) {
+- wr32(E1000_MBVFIMR, 0xFF);
++ E1000_WRITE_REG(hw, E1000_MBVFIMR, 0xFF);
+ ims |= E1000_IMS_VMMB;
++ if (adapter->mdd)
++ if ((adapter->hw.mac.type == e1000_i350) ||
++ (adapter->hw.mac.type == e1000_i354))
++ ims |= E1000_IMS_MDDET;
+ }
+- wr32(E1000_IMS, ims);
++ E1000_WRITE_REG(hw, E1000_IMS, ims);
+ } else {
+- wr32(E1000_IMS, IMS_ENABLE_MASK |
++ E1000_WRITE_REG(hw, E1000_IMS, IMS_ENABLE_MASK |
+ E1000_IMS_DRSTA);
+- wr32(E1000_IAM, IMS_ENABLE_MASK |
++ E1000_WRITE_REG(hw, E1000_IAM, IMS_ENABLE_MASK |
+ E1000_IMS_DRSTA);
+ }
+ }
+@@ -1539,7 +1456,7 @@
+
+ if (hw->mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN) {
+ /* add VID to filter table */
+- igb_vfta_set(hw, vid, true);
++ igb_vfta_set(adapter, vid, TRUE);
+ adapter->mng_vlan_id = vid;
+ } else {
+ adapter->mng_vlan_id = IGB_MNG_VLAN_NONE;
+@@ -1547,19 +1464,24 @@
+
+ if ((old_vid != (u16)IGB_MNG_VLAN_NONE) &&
+ (vid != old_vid) &&
++#ifdef HAVE_VLAN_RX_REGISTER
++ !vlan_group_get_device(adapter->vlgrp, old_vid)) {
++#else
+ !test_bit(old_vid, adapter->active_vlans)) {
++#endif
+ /* remove VID from filter table */
+- igb_vfta_set(hw, old_vid, false);
++ igb_vfta_set(adapter, old_vid, FALSE);
+ }
+ }
+
+ /**
+- * igb_release_hw_control - release control of the h/w to f/w
+- * @adapter: address of board private structure
++ * igb_release_hw_control - release control of the h/w to f/w
++ * @adapter: address of board private structure
++ *
++ * igb_release_hw_control resets CTRL_EXT:DRV_LOAD bit.
++ * For ASF and Pass Through versions of f/w this means that the
++ * driver is no longer loaded.
+ *
+- * igb_release_hw_control resets CTRL_EXT:DRV_LOAD bit.
+- * For ASF and Pass Through versions of f/w this means that the
+- * driver is no longer loaded.
+ **/
+ static void igb_release_hw_control(struct igb_adapter *adapter)
+ {
+@@ -1567,18 +1489,19 @@
+ u32 ctrl_ext;
+
+ /* Let firmware take over control of h/w */
+- ctrl_ext = rd32(E1000_CTRL_EXT);
+- wr32(E1000_CTRL_EXT,
++ ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
++ E1000_WRITE_REG(hw, E1000_CTRL_EXT,
+ ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
+ }
+
+ /**
+- * igb_get_hw_control - get control of the h/w from f/w
+- * @adapter: address of board private structure
++ * igb_get_hw_control - get control of the h/w from f/w
++ * @adapter: address of board private structure
++ *
++ * igb_get_hw_control sets CTRL_EXT:DRV_LOAD bit.
++ * For ASF and Pass Through versions of f/w this means that
++ * the driver is loaded.
+ *
+- * igb_get_hw_control sets CTRL_EXT:DRV_LOAD bit.
+- * For ASF and Pass Through versions of f/w this means that
+- * the driver is loaded.
+ **/
+ static void igb_get_hw_control(struct igb_adapter *adapter)
+ {
+@@ -1586,14 +1509,14 @@
+ u32 ctrl_ext;
+
+ /* Let firmware know the driver has taken over */
+- ctrl_ext = rd32(E1000_CTRL_EXT);
+- wr32(E1000_CTRL_EXT,
++ ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
++ E1000_WRITE_REG(hw, E1000_CTRL_EXT,
+ ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
+ }
+
+ /**
+- * igb_configure - configure the hardware for RX and TX
+- * @adapter: private board structure
++ * igb_configure - configure the hardware for RX and TX
++ * @adapter: private board structure
+ **/
+ static void igb_configure(struct igb_adapter *adapter)
+ {
+@@ -1612,7 +1535,13 @@
+ igb_configure_tx(adapter);
+ igb_configure_rx(adapter);
+
+- igb_rx_fifo_flush_82575(&adapter->hw);
++ e1000_rx_fifo_flush_82575(&adapter->hw);
++#ifdef CONFIG_NETDEVICES_MULTIQUEUE
++ if (adapter->num_tx_queues > 1)
++ netdev->features |= NETIF_F_MULTI_QUEUE;
++ else
++ netdev->features &= ~NETIF_F_MULTI_QUEUE;
++#endif
+
+ /* call igb_desc_unused which always leaves
+ * at least 1 descriptor unused to make sure
+@@ -1625,45 +1554,42 @@
+ }
+
+ /**
+- * igb_power_up_link - Power up the phy/serdes link
+- * @adapter: address of board private structure
++ * igb_power_up_link - Power up the phy/serdes link
++ * @adapter: address of board private structure
+ **/
+ void igb_power_up_link(struct igb_adapter *adapter)
+ {
+- igb_reset_phy(&adapter->hw);
++ igb_e1000_phy_hw_reset(&adapter->hw);
+
+ if (adapter->hw.phy.media_type == e1000_media_type_copper)
+- igb_power_up_phy_copper(&adapter->hw);
++ igb_e1000_power_up_phy(&adapter->hw);
+ else
+- igb_power_up_serdes_link_82575(&adapter->hw);
+-
+- igb_setup_link(&adapter->hw);
++ e1000_power_up_fiber_serdes_link(&adapter->hw);
+ }
+
+ /**
+- * igb_power_down_link - Power down the phy/serdes link
+- * @adapter: address of board private structure
++ * igb_power_down_link - Power down the phy/serdes link
++ * @adapter: address of board private structure
+ */
+ static void igb_power_down_link(struct igb_adapter *adapter)
+ {
+ if (adapter->hw.phy.media_type == e1000_media_type_copper)
+- igb_power_down_phy_copper_82575(&adapter->hw);
++ e1000_power_down_phy(&adapter->hw);
+ else
+- igb_shutdown_serdes_link_82575(&adapter->hw);
++ e1000_shutdown_fiber_serdes_link(&adapter->hw);
+ }
+
+-/**
+- * Detect and switch function for Media Auto Sense
+- * @adapter: address of the board private structure
+- **/
++/* Detect and switch function for Media Auto Sense */
+ static void igb_check_swap_media(struct igb_adapter *adapter)
+ {
+ struct e1000_hw *hw = &adapter->hw;
+ u32 ctrl_ext, connsw;
+ bool swap_now = false;
++ bool link;
+
+- ctrl_ext = rd32(E1000_CTRL_EXT);
+- connsw = rd32(E1000_CONNSW);
++ ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
++ connsw = E1000_READ_REG(hw, E1000_CONNSW);
++ link = igb_has_link(adapter);
+
+ /* need to live swap if current media is copper and we have fiber/serdes
+ * to go to.
+@@ -1674,10 +1600,10 @@
+ swap_now = true;
+ } else if (!(connsw & E1000_CONNSW_SERDESD)) {
+ /* copper signal takes time to appear */
+- if (adapter->copper_tries < 4) {
++ if (adapter->copper_tries < 3) {
+ adapter->copper_tries++;
+ connsw |= E1000_CONNSW_AUTOSENSE_CONF;
+- wr32(E1000_CONNSW, connsw);
++ E1000_WRITE_REG(hw, E1000_CONNSW, connsw);
+ return;
+ } else {
+ adapter->copper_tries = 0;
+@@ -1685,143 +1611,263 @@
+ (!(connsw & E1000_CONNSW_PHY_PDN))) {
+ swap_now = true;
+ connsw &= ~E1000_CONNSW_AUTOSENSE_CONF;
+- wr32(E1000_CONNSW, connsw);
++ E1000_WRITE_REG(hw, E1000_CONNSW, connsw);
+ }
+ }
+ }
+
+- if (!swap_now)
+- return;
+-
+- switch (hw->phy.media_type) {
+- case e1000_media_type_copper:
+- netdev_info(adapter->netdev,
+- "MAS: changing media to fiber/serdes\n");
+- ctrl_ext |=
+- E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES;
+- adapter->flags |= IGB_FLAG_MEDIA_RESET;
+- adapter->copper_tries = 0;
+- break;
+- case e1000_media_type_internal_serdes:
+- case e1000_media_type_fiber:
+- netdev_info(adapter->netdev,
+- "MAS: changing media to copper\n");
+- ctrl_ext &=
+- ~E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES;
+- adapter->flags |= IGB_FLAG_MEDIA_RESET;
+- break;
+- default:
+- /* shouldn't get here during regular operation */
+- netdev_err(adapter->netdev,
+- "AMS: Invalid media type found, returning\n");
+- break;
++ if (swap_now) {
++ switch (hw->phy.media_type) {
++ case e1000_media_type_copper:
++ dev_info(pci_dev_to_dev(adapter->pdev),
++ "%s:MAS: changing media to fiber/serdes\n",
++ adapter->netdev->name);
++ ctrl_ext |=
++ E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES;
++ adapter->flags |= IGB_FLAG_MEDIA_RESET;
++ adapter->copper_tries = 0;
++ break;
++ case e1000_media_type_internal_serdes:
++ case e1000_media_type_fiber:
++ dev_info(pci_dev_to_dev(adapter->pdev),
++ "%s:MAS: changing media to copper\n",
++ adapter->netdev->name);
++ ctrl_ext &=
++ ~E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES;
++ adapter->flags |= IGB_FLAG_MEDIA_RESET;
++ break;
++ default:
++ /* shouldn't get here during regular operation */
++ dev_err(pci_dev_to_dev(adapter->pdev),
++ "%s:AMS: Invalid media type found, returning\n",
++ adapter->netdev->name);
++ break;
++ }
++ E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
+ }
+- wr32(E1000_CTRL_EXT, ctrl_ext);
+ }
+
+-/**
+- * igb_up - Open the interface and prepare it to handle traffic
+- * @adapter: board private structure
+- **/
+-int igb_up(struct igb_adapter *adapter)
++#ifdef HAVE_I2C_SUPPORT
++/* igb_get_i2c_data - Reads the I2C SDA data bit
++ * @hw: pointer to hardware structure
++ * @i2cctl: Current value of I2CCTL register
++ *
++ * Returns the I2C data bit value
++ */
++static int igb_get_i2c_data(void *data)
+ {
++ struct igb_adapter *adapter = (struct igb_adapter *)data;
+ struct e1000_hw *hw = &adapter->hw;
+- int i;
+-
+- /* hardware has been reset, we need to reload some things */
+- igb_configure(adapter);
++ s32 i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS);
+
+- clear_bit(__IGB_DOWN, &adapter->state);
++ return !!(i2cctl & E1000_I2C_DATA_IN);
++}
+
+- for (i = 0; i < adapter->num_q_vectors; i++)
+- napi_enable(&(adapter->q_vector[i]->napi));
++/* igb_set_i2c_data - Sets the I2C data bit
++ * @data: pointer to hardware structure
++ * @state: I2C data value (0 or 1) to set
++ *
++ * Sets the I2C data bit
++ */
++static void igb_set_i2c_data(void *data, int state)
++{
++ struct igb_adapter *adapter = (struct igb_adapter *)data;
++ struct e1000_hw *hw = &adapter->hw;
++ s32 i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS);
+
+- if (adapter->flags & IGB_FLAG_HAS_MSIX)
+- igb_configure_msix(adapter);
++ if (state)
++ i2cctl |= E1000_I2C_DATA_OUT;
+ else
+- igb_assign_vector(adapter->q_vector[0], 0);
+-
+- /* Clear any pending interrupts. */
+- rd32(E1000_ICR);
+- igb_irq_enable(adapter);
+-
+- /* notify VFs that reset has been completed */
+- if (adapter->vfs_allocated_count) {
+- u32 reg_data = rd32(E1000_CTRL_EXT);
++ i2cctl &= ~E1000_I2C_DATA_OUT;
+
+- reg_data |= E1000_CTRL_EXT_PFRSTD;
+- wr32(E1000_CTRL_EXT, reg_data);
+- }
++ i2cctl &= ~E1000_I2C_DATA_OE_N;
++ i2cctl |= E1000_I2C_CLK_OE_N;
+
+- netif_tx_start_all_queues(adapter->netdev);
++ E1000_WRITE_REG(hw, E1000_I2CPARAMS, i2cctl);
++ E1000_WRITE_FLUSH(hw);
+
+- /* start the watchdog. */
+- hw->mac.get_link_status = 1;
+- schedule_work(&adapter->watchdog_task);
++}
+
+- if ((adapter->flags & IGB_FLAG_EEE) &&
+- (!hw->dev_spec._82575.eee_disable))
+- adapter->eee_advert = MDIO_EEE_100TX | MDIO_EEE_1000T;
++/* igb_set_i2c_clk - Sets the I2C SCL clock
++ * @data: pointer to hardware structure
++ * @state: state to set clock
++ *
++ * Sets the I2C clock line to state
++ */
++static void igb_set_i2c_clk(void *data, int state)
++{
++ struct igb_adapter *adapter = (struct igb_adapter *)data;
++ struct e1000_hw *hw = &adapter->hw;
++ s32 i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS);
+
+- return 0;
++ if (state) {
++ i2cctl |= E1000_I2C_CLK_OUT;
++ i2cctl &= ~E1000_I2C_CLK_OE_N;
++ } else {
++ i2cctl &= ~E1000_I2C_CLK_OUT;
++ i2cctl &= ~E1000_I2C_CLK_OE_N;
++ }
++ E1000_WRITE_REG(hw, E1000_I2CPARAMS, i2cctl);
++ E1000_WRITE_FLUSH(hw);
+ }
+
+-void igb_down(struct igb_adapter *adapter)
++/* igb_get_i2c_clk - Gets the I2C SCL clock state
++ * @data: pointer to hardware structure
++ *
++ * Gets the I2C clock state
++ */
++static int igb_get_i2c_clk(void *data)
+ {
+- struct net_device *netdev = adapter->netdev;
++ struct igb_adapter *adapter = (struct igb_adapter *)data;
+ struct e1000_hw *hw = &adapter->hw;
+- u32 tctl, rctl;
+- int i;
++ s32 i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS);
+
+- /* signal that we're down so the interrupt handler does not
+- * reschedule our watchdog timer
+- */
+- set_bit(__IGB_DOWN, &adapter->state);
++ return !!(i2cctl & E1000_I2C_CLK_IN);
++}
++
++static const struct i2c_algo_bit_data igb_i2c_algo = {
++ .setsda = igb_set_i2c_data,
++ .setscl = igb_set_i2c_clk,
++ .getsda = igb_get_i2c_data,
++ .getscl = igb_get_i2c_clk,
++ .udelay = 5,
++ .timeout = 20,
++};
++
++/* igb_init_i2c - Init I2C interface
++ * @adapter: pointer to adapter structure
++ *
++ */
++static s32 igb_init_i2c(struct igb_adapter *adapter)
++{
++ s32 status = E1000_SUCCESS;
++
++ /* I2C interface supported on i350 devices */
++ if (adapter->hw.mac.type != e1000_i350)
++ return E1000_SUCCESS;
++
++ /* Initialize the i2c bus which is controlled by the registers.
++ * This bus will use the i2c_algo_bit structue that implements
++ * the protocol through toggling of the 4 bits in the register.
++ */
++ adapter->i2c_adap.owner = THIS_MODULE;
++ adapter->i2c_algo = igb_i2c_algo;
++ adapter->i2c_algo.data = adapter;
++ adapter->i2c_adap.algo_data = &adapter->i2c_algo;
++ adapter->i2c_adap.dev.parent = &adapter->pdev->dev;
++ strlcpy(adapter->i2c_adap.name, "igb BB",
++ sizeof(adapter->i2c_adap.name));
++ status = i2c_bit_add_bus(&adapter->i2c_adap);
++ return status;
++}
++
++#endif /* HAVE_I2C_SUPPORT */
++/**
++ * igb_up - Open the interface and prepare it to handle traffic
++ * @adapter: board private structure
++ **/
++int igb_up(struct igb_adapter *adapter)
++{
++ struct e1000_hw *hw = &adapter->hw;
++ int i;
++
++ /* hardware has been reset, we need to reload some things */
++ igb_configure(adapter);
++
++ clear_bit(__IGB_DOWN, &adapter->state);
++
++ for (i = 0; i < adapter->num_q_vectors; i++)
++ napi_enable(&(adapter->q_vector[i]->napi));
++
++ if (adapter->msix_entries)
++ igb_configure_msix(adapter);
++ else
++ igb_assign_vector(adapter->q_vector[0], 0);
++
++ igb_configure_lli(adapter);
++
++ /* Clear any pending interrupts. */
++ E1000_READ_REG(hw, E1000_ICR);
++ igb_irq_enable(adapter);
++
++ /* notify VFs that reset has been completed */
++ if (adapter->vfs_allocated_count) {
++ u32 reg_data = E1000_READ_REG(hw, E1000_CTRL_EXT);
++
++ reg_data |= E1000_CTRL_EXT_PFRSTD;
++ E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg_data);
++ }
++
++ netif_tx_start_all_queues(adapter->netdev);
++
++ if (adapter->flags & IGB_FLAG_DETECT_BAD_DMA)
++ schedule_work(&adapter->dma_err_task);
++ /* start the watchdog. */
++ hw->mac.get_link_status = 1;
++ schedule_work(&adapter->watchdog_task);
++
++ if ((adapter->flags & IGB_FLAG_EEE) &&
++ (!hw->dev_spec._82575.eee_disable))
++ adapter->eee_advert = MDIO_EEE_100TX | MDIO_EEE_1000T;
++
++ return 0;
++}
++
++void igb_down(struct igb_adapter *adapter)
++{
++ struct net_device *netdev = adapter->netdev;
++ struct e1000_hw *hw = &adapter->hw;
++ u32 tctl, rctl;
++ int i;
++
++ /* signal that we're down so the interrupt handler does not
++ * reschedule our watchdog timer
++ */
++ set_bit(__IGB_DOWN, &adapter->state);
+
+ /* disable receives in the hardware */
+- rctl = rd32(E1000_RCTL);
+- wr32(E1000_RCTL, rctl & ~E1000_RCTL_EN);
++ rctl = E1000_READ_REG(hw, E1000_RCTL);
++ E1000_WRITE_REG(hw, E1000_RCTL, rctl & ~E1000_RCTL_EN);
+ /* flush and sleep below */
+
++ netif_carrier_off(netdev);
+ netif_tx_stop_all_queues(netdev);
+
+ /* disable transmits in the hardware */
+- tctl = rd32(E1000_TCTL);
++ tctl = E1000_READ_REG(hw, E1000_TCTL);
+ tctl &= ~E1000_TCTL_EN;
+- wr32(E1000_TCTL, tctl);
++ E1000_WRITE_REG(hw, E1000_TCTL, tctl);
+ /* flush both disables and wait for them to finish */
+- wrfl();
+- usleep_range(10000, 11000);
++ E1000_WRITE_FLUSH(hw);
++ usleep_range(10000, 20000);
++
++ for (i = 0; i < adapter->num_q_vectors; i++)
++ napi_disable(&(adapter->q_vector[i]->napi));
+
+ igb_irq_disable(adapter);
+
+ adapter->flags &= ~IGB_FLAG_NEED_LINK_UPDATE;
+
+- for (i = 0; i < adapter->num_q_vectors; i++) {
+- napi_synchronize(&(adapter->q_vector[i]->napi));
+- napi_disable(&(adapter->q_vector[i]->napi));
+- }
+-
+-
+ del_timer_sync(&adapter->watchdog_timer);
++ if (adapter->flags & IGB_FLAG_DETECT_BAD_DMA)
++ del_timer_sync(&adapter->dma_err_timer);
+ del_timer_sync(&adapter->phy_info_timer);
+
+- netif_carrier_off(netdev);
+-
+ /* record the stats before reset*/
+- spin_lock(&adapter->stats64_lock);
+- igb_update_stats(adapter, &adapter->stats64);
+- spin_unlock(&adapter->stats64_lock);
++ igb_update_stats(adapter);
+
+ adapter->link_speed = 0;
+ adapter->link_duplex = 0;
+
++#ifdef HAVE_PCI_ERS
+ if (!pci_channel_offline(adapter->pdev))
+ igb_reset(adapter);
++#else
++ igb_reset(adapter);
++#endif
+ igb_clean_all_tx_rings(adapter);
+ igb_clean_all_rx_rings(adapter);
+-#ifdef CONFIG_IGB_DCA
+-
++#ifdef IGB_DCA
+ /* since we reset the hardware DCA settings were cleared */
+ igb_setup_dca(adapter);
+ #endif
+@@ -1837,35 +1883,26 @@
+ clear_bit(__IGB_RESETTING, &adapter->state);
+ }
+
+-/** igb_enable_mas - Media Autosense re-enable after swap
++/**
++ * igb_enable_mas - Media Autosense re-enable after swap
+ *
+ * @adapter: adapter struct
+ **/
+-static s32 igb_enable_mas(struct igb_adapter *adapter)
++void igb_enable_mas(struct igb_adapter *adapter)
+ {
+ struct e1000_hw *hw = &adapter->hw;
+ u32 connsw;
+- s32 ret_val = 0;
+
+- connsw = rd32(E1000_CONNSW);
+- if (!(hw->phy.media_type == e1000_media_type_copper))
+- return ret_val;
++ connsw = E1000_READ_REG(hw, E1000_CONNSW);
+
+ /* configure for SerDes media detect */
+- if (!(connsw & E1000_CONNSW_SERDESD)) {
++ if ((hw->phy.media_type == e1000_media_type_copper) &&
++ (!(connsw & E1000_CONNSW_SERDESD))) {
+ connsw |= E1000_CONNSW_ENRGSRC;
+ connsw |= E1000_CONNSW_AUTOSENSE_EN;
+- wr32(E1000_CONNSW, connsw);
+- wrfl();
+- } else if (connsw & E1000_CONNSW_SERDESD) {
+- /* already SerDes, no need to enable anything */
+- return ret_val;
+- } else {
+- netdev_info(adapter->netdev,
+- "MAS: Unable to configure feature, disabling..\n");
+- adapter->flags &= ~IGB_FLAG_MAS_ENABLE;
++ E1000_WRITE_REG(hw, E1000_CONNSW, connsw);
++ E1000_WRITE_FLUSH(hw);
+ }
+- return ret_val;
+ }
+
+ void igb_reset(struct igb_adapter *adapter)
+@@ -1881,13 +1918,13 @@
+ */
+ switch (mac->type) {
+ case e1000_i350:
+- case e1000_i354:
+ case e1000_82580:
+- pba = rd32(E1000_RXPBS);
+- pba = igb_rxpbs_adjust_82580(pba);
++ case e1000_i354:
++ pba = E1000_READ_REG(hw, E1000_RXPBS);
++ pba = e1000_rxpbs_adjust_82580(pba);
+ break;
+ case e1000_82576:
+- pba = rd32(E1000_RXPBS);
++ pba = E1000_READ_REG(hw, E1000_RXPBS);
+ pba &= E1000_RXPBS_SIZE_MASK_82576;
+ break;
+ case e1000_82575:
+@@ -1901,7 +1938,7 @@
+ if ((adapter->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN) &&
+ (mac->type < e1000_82576)) {
+ /* adjust PBA for jumbo frames */
+- wr32(E1000_PBA, pba);
++ E1000_WRITE_REG(hw, E1000_PBA, pba);
+
+ /* To maintain wire speed transmits, the Tx FIFO should be
+ * large enough to accommodate two full transmit packets,
+@@ -1910,12 +1947,12 @@
+ * one full receive packet and is similarly rounded up and
+ * expressed in KB.
+ */
+- pba = rd32(E1000_PBA);
++ pba = E1000_READ_REG(hw, E1000_PBA);
+ /* upper 16 bits has Tx packet buffer allocation size in KB */
+ tx_space = pba >> 16;
+ /* lower 16 bits has Rx packet buffer allocation size in KB */
+ pba &= 0xffff;
+- /* the Tx fifo also stores 16 bytes of information about the Tx
++ /* the tx fifo also stores 16 bytes of information about the tx
+ * but don't include ethernet FCS because hardware appends it
+ */
+ min_tx_space = (adapter->max_frame_size +
+@@ -1936,13 +1973,13 @@
+ ((min_tx_space - tx_space) < pba)) {
+ pba = pba - (min_tx_space - tx_space);
+
+- /* if short on Rx space, Rx wins and must trump Tx
++ /* if short on rx space, rx wins and must trump tx
+ * adjustment
+ */
+ if (pba < min_rx_space)
+ pba = min_rx_space;
+ }
+- wr32(E1000_PBA, pba);
++ E1000_WRITE_REG(hw, E1000_PBA, pba);
+ }
+
+ /* flow control settings */
+@@ -1965,6 +2002,10 @@
+ if (adapter->vfs_allocated_count) {
+ int i;
+
++ /*
++ * Clear all flags except indication that the PF has set
++ * the VF MAC addresses administratively
++ */
+ for (i = 0 ; i < adapter->vfs_allocated_count; i++)
+ adapter->vf_data[i].flags &= IGB_VF_FLAG_PF_SET_MAC;
+
+@@ -1972,116 +2013,334 @@
+ igb_ping_all_vfs(adapter);
+
+ /* disable transmits and receives */
+- wr32(E1000_VFRE, 0);
+- wr32(E1000_VFTE, 0);
++ E1000_WRITE_REG(hw, E1000_VFRE, 0);
++ E1000_WRITE_REG(hw, E1000_VFTE, 0);
+ }
+
+ /* Allow time for pending master requests to run */
+- hw->mac.ops.reset_hw(hw);
+- wr32(E1000_WUC, 0);
++ igb_e1000_reset_hw(hw);
++ E1000_WRITE_REG(hw, E1000_WUC, 0);
+
+ if (adapter->flags & IGB_FLAG_MEDIA_RESET) {
+- /* need to resetup here after media swap */
+- adapter->ei.get_invariants(hw);
++ e1000_setup_init_funcs(hw, TRUE);
++ igb_check_options(adapter);
++ igb_e1000_get_bus_info(hw);
+ adapter->flags &= ~IGB_FLAG_MEDIA_RESET;
+ }
+- if (adapter->flags & IGB_FLAG_MAS_ENABLE) {
+- if (igb_enable_mas(adapter))
+- dev_err(&pdev->dev,
+- "Error enabling Media Auto Sense\n");
++ if ((mac->type == e1000_82575) &&
++ (adapter->flags & IGB_FLAG_MAS_ENABLE)) {
++ igb_enable_mas(adapter);
+ }
+- if (hw->mac.ops.init_hw(hw))
+- dev_err(&pdev->dev, "Hardware Error\n");
++ if (igb_e1000_init_hw(hw))
++ dev_err(pci_dev_to_dev(pdev), "Hardware Error\n");
+
+- /* Flow control settings reset on hardware reset, so guarantee flow
++ /*
++ * Flow control settings reset on hardware reset, so guarantee flow
+ * control is off when forcing speed.
+ */
+ if (!hw->mac.autoneg)
+- igb_force_mac_fc(hw);
++ igb_e1000_force_mac_fc(hw);
+
+ igb_init_dmac(adapter, pba);
+-#ifdef CONFIG_IGB_HWMON
+ /* Re-initialize the thermal sensor on i350 devices. */
+- if (!test_bit(__IGB_DOWN, &adapter->state)) {
+- if (mac->type == e1000_i350 && hw->bus.func == 0) {
+- /* If present, re-initialize the external thermal sensor
+- * interface.
+- */
+- if (adapter->ets)
+- mac->ops.init_thermal_sensor_thresh(hw);
+- }
++ if (mac->type == e1000_i350 && hw->bus.func == 0) {
++ /*
++ * If present, re-initialize the external thermal sensor
++ * interface.
++ */
++ if (adapter->ets)
++ e1000_set_i2c_bb(hw);
++ e1000_init_thermal_sensor_thresh(hw);
+ }
+-#endif
+- /* Re-establish EEE setting */
++
++ /*Re-establish EEE setting */
+ if (hw->phy.media_type == e1000_media_type_copper) {
+ switch (mac->type) {
+ case e1000_i350:
+ case e1000_i210:
+ case e1000_i211:
+- igb_set_eee_i350(hw);
++ e1000_set_eee_i350(hw, true, true);
+ break;
+ case e1000_i354:
+- igb_set_eee_i354(hw);
++ e1000_set_eee_i354(hw, true, true);
+ break;
+ default:
+ break;
+ }
+ }
++
+ if (!netif_running(adapter->netdev))
+ igb_power_down_link(adapter);
+
+ igb_update_mng_vlan(adapter);
+
+ /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
+- wr32(E1000_VET, ETHERNET_IEEE_VLAN_TYPE);
++ E1000_WRITE_REG(hw, E1000_VET, ETHERNET_IEEE_VLAN_TYPE);
+
++#ifdef HAVE_PTP_1588_CLOCK
+ /* Re-enable PTP, where applicable. */
+ igb_ptp_reset(adapter);
++#endif /* HAVE_PTP_1588_CLOCK */
+
+- igb_get_phy_info(hw);
++ e1000_get_phy_info(hw);
++
++ adapter->devrc++;
+ }
+
++#ifdef HAVE_NDO_SET_FEATURES
++#ifdef HAVE_RHEL6_NET_DEVICE_OPS_EXT
++static u32 igb_fix_features(struct net_device *netdev,
++ u32 features)
++#else
+ static netdev_features_t igb_fix_features(struct net_device *netdev,
+- netdev_features_t features)
++ netdev_features_t features)
++#endif /* HAVE_RHEL6_NET_DEVICE_OPS_EXT */
+ {
+- /* Since there is no support for separate Rx/Tx vlan accel
+- * enable/disable make sure Tx flag is always in same state as Rx.
++ /*
++ * Since there is no support for separate tx vlan accel
++ * enabled make sure tx flag is cleared if rx is.
+ */
+- if (features & NETIF_F_HW_VLAN_CTAG_RX)
+- features |= NETIF_F_HW_VLAN_CTAG_TX;
+- else
++#ifdef NETIF_F_HW_VLAN_CTAG_RX
++ if (!(features & NETIF_F_HW_VLAN_CTAG_RX))
+ features &= ~NETIF_F_HW_VLAN_CTAG_TX;
++#else
++ if (!(features & NETIF_F_HW_VLAN_RX))
++ features &= ~NETIF_F_HW_VLAN_TX;
++#endif /* NETIF_F_HW_VLAN_CTAG_RX */
++
++#ifndef IGB_NO_LRO
++ /* If Rx checksum is disabled, then LRO should also be disabled */
++ if (!(features & NETIF_F_RXCSUM))
++ features &= ~NETIF_F_LRO;
+
++#endif
+ return features;
+ }
+
+ static int igb_set_features(struct net_device *netdev,
+- netdev_features_t features)
++#ifdef HAVE_RHEL6_NET_DEVICE_OPS_EXT
++ u32 features)
++#else
++ netdev_features_t features)
++#endif /* HAVE_RHEL6_NET_DEVICE_OPS_EXT */
+ {
+ netdev_features_t changed = netdev->features ^ features;
++#ifdef HAVE_RHEL6_NET_DEVICE_OPS_EXT
+ struct igb_adapter *adapter = netdev_priv(netdev);
++#endif
+
++#ifdef NETIF_F_HW_VLAN_CTAG_RX
+ if (changed & NETIF_F_HW_VLAN_CTAG_RX)
++#else
++ if (changed & NETIF_F_HW_VLAN_RX)
++#endif /* NETIF_F_HW_VLAN_CTAG_RX */
++ netdev->features = features;
++#ifdef HAVE_VLAN_RX_REGISTER
++ igb_vlan_mode(netdev, adapter->vlgrp);
++#else
+ igb_vlan_mode(netdev, features);
++#endif
+
+- if (!(changed & NETIF_F_RXALL))
++ if (!(changed & (NETIF_F_RXALL | NETIF_F_NTUPLE)))
+ return 0;
+
+ netdev->features = features;
+
+- if (netif_running(netdev))
+- igb_reinit_locked(adapter);
+- else
+- igb_reset(adapter);
++ return 0;
++}
++#endif /* HAVE_NDO_SET_FEATURES */
++
++#ifdef HAVE_FDB_OPS
++#ifdef USE_CONST_DEV_UC_CHAR
++static int igb_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
++ struct net_device *dev,
++ const unsigned char *addr,
++#ifdef HAVE_NDO_FDB_ADD_VID
++ u16 vid,
++#endif /* HAVE_NDO_FDB_ADD_VID */
++ u16 flags)
++#else /* USE_CONST_DEV_UC_CHAR */
++static int igb_ndo_fdb_add(struct ndmsg *ndm,
++ struct net_device *dev,
++ unsigned char *addr,
++ u16 flags)
++#endif /* USE_CONST_DEV_UC_CHAR */
++{
++ struct igb_adapter *adapter = netdev_priv(dev);
++ struct e1000_hw *hw = &adapter->hw;
++ int err;
++
++ if (!(adapter->vfs_allocated_count))
++ return -EOPNOTSUPP;
++
++ /* Hardware does not support aging addresses so if a
++ * ndm_state is given only allow permanent addresses
++ */
++ if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) {
++ pr_info("%s: FDB only supports static addresses\n",
++ igb_driver_name);
++ return -EINVAL;
++ }
++
++ if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) {
++ u32 rar_uc_entries = hw->mac.rar_entry_count -
++ (adapter->vfs_allocated_count + 1);
++
++ if (netdev_uc_count(dev) < rar_uc_entries)
++ err = dev_uc_add_excl(dev, addr);
++ else
++ err = -ENOMEM;
++ } else if (is_multicast_ether_addr(addr)) {
++ err = dev_mc_add_excl(dev, addr);
++ } else {
++ err = -EINVAL;
++ }
++
++ /* Only return duplicate errors if NLM_F_EXCL is set */
++ if (err == -EEXIST && !(flags & NLM_F_EXCL))
++ err = 0;
++
++ return err;
++}
++
++#ifndef USE_DEFAULT_FDB_DEL_DUMP
++#ifdef USE_CONST_DEV_UC_CHAR
++static int igb_ndo_fdb_del(struct ndmsg *ndm,
++ struct net_device *dev,
++ const unsigned char *addr)
++#else
++static int igb_ndo_fdb_del(struct ndmsg *ndm,
++ struct net_device *dev,
++ unsigned char *addr)
++#endif /* USE_CONST_DEV_UC_CHAR */
++{
++ struct igb_adapter *adapter = netdev_priv(dev);
++ int err = -EOPNOTSUPP;
++
++ if (ndm->ndm_state & NUD_PERMANENT) {
++ pr_info("%s: FDB only supports static addresses\n",
++ igb_driver_name);
++ return -EINVAL;
++ }
++
++ if (adapter->vfs_allocated_count) {
++ if (is_unicast_ether_addr(addr))
++ err = dev_uc_del(dev, addr);
++ else if (is_multicast_ether_addr(addr))
++ err = dev_mc_del(dev, addr);
++ else
++ err = -EINVAL;
++ }
++
++ return err;
++}
++
++static int igb_ndo_fdb_dump(struct sk_buff *skb,
++ struct netlink_callback *cb,
++ struct net_device *dev,
++ int idx)
++{
++ struct igb_adapter *adapter = netdev_priv(dev);
++
++ if (adapter->vfs_allocated_count)
++ idx = ndo_dflt_fdb_dump(skb, cb, dev, idx);
++
++ return idx;
++}
++#endif /* USE_DEFAULT_FDB_DEL_DUMP */
++#ifdef HAVE_BRIDGE_ATTRIBS
++#ifdef HAVE_NDO_BRIDGE_SET_DEL_LINK_FLAGS
++static int igb_ndo_bridge_setlink(struct net_device *dev,
++ struct nlmsghdr *nlh,
++ u16 flags)
++#else
++static int igb_ndo_bridge_setlink(struct net_device *dev,
++ struct nlmsghdr *nlh)
++#endif /* HAVE_NDO_BRIDGE_SET_DEL_LINK_FLAGS */
++{
++ struct igb_adapter *adapter = netdev_priv(dev);
++ struct e1000_hw *hw = &adapter->hw;
++ struct nlattr *attr, *br_spec;
++ int rem;
++
++ if (!(adapter->vfs_allocated_count))
++ return -EOPNOTSUPP;
++
++ switch (adapter->hw.mac.type) {
++ case e1000_82576:
++ case e1000_i350:
++ case e1000_i354:
++ break;
++ default:
++ return -EOPNOTSUPP;
++ }
++
++ br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
++
++ nla_for_each_nested(attr, br_spec, rem) {
++ __u16 mode;
++
++ if (nla_type(attr) != IFLA_BRIDGE_MODE)
++ continue;
++
++ mode = nla_get_u16(attr);
++ if (mode == BRIDGE_MODE_VEPA) {
++ e1000_vmdq_set_loopback_pf(hw, 0);
++ adapter->flags &= ~IGB_FLAG_LOOPBACK_ENABLE;
++ } else if (mode == BRIDGE_MODE_VEB) {
++ e1000_vmdq_set_loopback_pf(hw, 1);
++ adapter->flags |= IGB_FLAG_LOOPBACK_ENABLE;
++ } else
++ return -EINVAL;
++
++ netdev_info(adapter->netdev, "enabling bridge mode: %s\n",
++ mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
++ }
+
+ return 0;
+ }
+
++#ifdef HAVE_NDO_BRIDGE_GETLINK_NLFLAGS
++static int igb_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
++ struct net_device *dev, u32 filter_mask,
++ int nlflags)
++#elif defined(HAVE_BRIDGE_FILTER)
++static int igb_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
++ struct net_device *dev, u32 filter_mask)
++#else
++static int igb_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
++ struct net_device *dev)
++#endif /* HAVE_NDO_BRIDGE_GETLINK_NLFLAGS */
++{
++ struct igb_adapter *adapter = netdev_priv(dev);
++ u16 mode;
++
++ if (!(adapter->vfs_allocated_count))
++ return -EOPNOTSUPP;
++
++ if (adapter->flags & IGB_FLAG_LOOPBACK_ENABLE)
++ mode = BRIDGE_MODE_VEB;
++ else
++ mode = BRIDGE_MODE_VEPA;
++#ifdef HAVE_NDO_DFLT_BRIDGE_GETLINK_VLAN_SUPPORT
++ return ndo_dflt_bridge_getlink(skb, pid, seq, dev, mode, 0, 0, nlflags,
++ filter_mask, NULL);
++#elif defined(HAVE_NDO_BRIDGE_GETLINK_NLFLAGS)
++ return ndo_dflt_bridge_getlink(skb, pid, seq, dev, mode, 0, 0, nlflags);
++#elif defined(NDO_DFLT_BRIDGE_GETLINK_HAS_BRFLAGS)
++ return ndo_dflt_bridge_getlink(skb, pid, seq, dev, mode, 0, 0);
++#else
++ return ndo_dflt_bridge_getlink(skb, pid, seq, dev, mode);
++#endif /* NDO_DFLT_BRIDGE_GETLINK_HAS_BRFLAGS */
++}
++#endif /* HAVE_BRIDGE_ATTRIBS */
++#endif /* HAVE_FDB_OPS */
++#ifdef HAVE_NET_DEVICE_OPS
+ static const struct net_device_ops igb_netdev_ops = {
+ .ndo_open = igb_open,
+ .ndo_stop = igb_close,
+ .ndo_start_xmit = igb_xmit_frame,
+- .ndo_get_stats64 = igb_get_stats64,
++ .ndo_get_stats = igb_get_stats,
+ .ndo_set_rx_mode = igb_set_rx_mode,
+ .ndo_set_mac_address = igb_set_mac,
+ .ndo_change_mtu = igb_change_mtu,
+@@ -2090,60 +2349,190 @@
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_vlan_rx_add_vid = igb_vlan_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = igb_vlan_rx_kill_vid,
++#ifdef IFLA_VF_MAX
+ .ndo_set_vf_mac = igb_ndo_set_vf_mac,
+ .ndo_set_vf_vlan = igb_ndo_set_vf_vlan,
++#ifdef HAVE_NDO_SET_VF_MIN_MAX_TX_RATE
+ .ndo_set_vf_rate = igb_ndo_set_vf_bw,
+- .ndo_set_vf_spoofchk = igb_ndo_set_vf_spoofchk,
++#else
++ .ndo_set_vf_tx_rate = igb_ndo_set_vf_bw,
++#endif /*HAVE_NDO_SET_VF_MIN_MAX_TX_RATE */
+ .ndo_get_vf_config = igb_ndo_get_vf_config,
++#ifdef HAVE_VF_SPOOFCHK_CONFIGURE
++ .ndo_set_vf_spoofchk = igb_ndo_set_vf_spoofchk,
++#endif /* HAVE_VF_SPOOFCHK_CONFIGURE */
++#endif /* IFLA_VF_MAX */
+ #ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = igb_netpoll,
+ #endif
++#ifdef HAVE_VLAN_RX_REGISTER
++ .ndo_vlan_rx_register = igb_vlan_mode,
++#endif
++#ifdef HAVE_FDB_OPS
++ .ndo_fdb_add = igb_ndo_fdb_add,
++#ifndef USE_DEFAULT_FDB_DEL_DUMP
++ .ndo_fdb_del = igb_ndo_fdb_del,
++ .ndo_fdb_dump = igb_ndo_fdb_dump,
++#endif
++#ifdef HAVE_BRIDGE_ATTRIBS
++ .ndo_bridge_setlink = igb_ndo_bridge_setlink,
++ .ndo_bridge_getlink = igb_ndo_bridge_getlink,
++#endif /* HAVE_BRIDGE_ATTRIBS */
++#endif
++#ifdef HAVE_RHEL6_NET_DEVICE_OPS_EXT
++};
++
++/* RHEL6 keeps these operations in a separate structure */
++static const struct net_device_ops_ext igb_netdev_ops_ext = {
++ .size = sizeof(struct net_device_ops_ext),
++#endif /* HAVE_RHEL6_NET_DEVICE_OPS_EXT */
++#ifdef HAVE_NDO_SET_FEATURES
+ .ndo_fix_features = igb_fix_features,
+ .ndo_set_features = igb_set_features,
++#endif /* HAVE_NDO_SET_FEATURES */
+ };
+
++#ifdef CONFIG_IGB_VMDQ_NETDEV
++static const struct net_device_ops igb_vmdq_ops = {
++ .ndo_open = &igb_vmdq_open,
++ .ndo_stop = &igb_vmdq_close,
++ .ndo_start_xmit = &igb_vmdq_xmit_frame,
++ .ndo_get_stats = &igb_vmdq_get_stats,
++ .ndo_set_rx_mode = &igb_vmdq_set_rx_mode,
++ .ndo_validate_addr = eth_validate_addr,
++ .ndo_set_mac_address = &igb_vmdq_set_mac,
++ .ndo_change_mtu = &igb_vmdq_change_mtu,
++ .ndo_tx_timeout = &igb_vmdq_tx_timeout,
++ .ndo_vlan_rx_register = &igb_vmdq_vlan_rx_register,
++ .ndo_vlan_rx_add_vid = &igb_vmdq_vlan_rx_add_vid,
++ .ndo_vlan_rx_kill_vid = &igb_vmdq_vlan_rx_kill_vid,
++};
++
++#endif /* CONFIG_IGB_VMDQ_NETDEV */
++#endif /* HAVE_NET_DEVICE_OPS */
++#ifdef CONFIG_IGB_VMDQ_NETDEV
++void igb_assign_vmdq_netdev_ops(struct net_device *vnetdev)
++{
++#ifdef HAVE_NET_DEVICE_OPS
++ vnetdev->netdev_ops = &igb_vmdq_ops;
++#else
++ dev->open = &igb_vmdq_open;
++ dev->stop = &igb_vmdq_close;
++ dev->hard_start_xmit = &igb_vmdq_xmit_frame;
++ dev->get_stats = &igb_vmdq_get_stats;
++#ifdef HAVE_SET_RX_MODE
++ dev->set_rx_mode = &igb_vmdq_set_rx_mode;
++#endif
++ dev->set_multicast_list = &igb_vmdq_set_rx_mode;
++ dev->set_mac_address = &igb_vmdq_set_mac;
++ dev->change_mtu = &igb_vmdq_change_mtu;
++#ifdef HAVE_TX_TIMEOUT
++ dev->tx_timeout = &igb_vmdq_tx_timeout;
++#endif
++#if defined(NETIF_F_HW_VLAN_TX) || defined(NETIF_F_HW_VLAN_CTAG_TX)
++ dev->vlan_rx_register = &igb_vmdq_vlan_rx_register;
++ dev->vlan_rx_add_vid = &igb_vmdq_vlan_rx_add_vid;
++ dev->vlan_rx_kill_vid = &igb_vmdq_vlan_rx_kill_vid;
++#endif
++#endif /* HAVE_NET_DEVICE_OPS */
++ igb_vmdq_set_ethtool_ops(vnetdev);
++ vnetdev->watchdog_timeo = 5 * HZ;
++
++}
++
++int igb_init_vmdq_netdevs(struct igb_adapter *adapter)
++{
++ int pool, err = 0, base_queue;
++ struct net_device *vnetdev;
++ struct igb_vmdq_adapter *vmdq_adapter;
++
++ for (pool = 1; pool < adapter->vmdq_pools; pool++) {
++ int qpp = (!adapter->rss_queues ? 1 : adapter->rss_queues);
++
++ base_queue = pool * qpp;
++ vnetdev = alloc_etherdev(sizeof(struct igb_vmdq_adapter));
++ if (!vnetdev) {
++ err = -ENOMEM;
++ break;
++ }
++
++ vmdq_adapter = netdev_priv(vnetdev);
++ vmdq_adapter->vnetdev = vnetdev;
++ vmdq_adapter->real_adapter = adapter;
++ vmdq_adapter->rx_ring = adapter->rx_ring[base_queue];
++ vmdq_adapter->tx_ring = adapter->tx_ring[base_queue];
++ igb_assign_vmdq_netdev_ops(vnetdev);
++ snprintf(vnetdev->name, IFNAMSIZ, "%sv%d",
++ adapter->netdev->name, pool);
++ vnetdev->features = adapter->netdev->features;
++#ifdef HAVE_NETDEV_VLAN_FEATURES
++ vnetdev->vlan_features = adapter->netdev->vlan_features;
++#endif /* HAVE_NETDEV_VLAN_FEATURES */
++ adapter->vmdq_netdev[pool-1] = vnetdev;
++ err = register_netdev(vnetdev);
++ if (err)
++ break;
++ }
++ return err;
++}
++
++int igb_remove_vmdq_netdevs(struct igb_adapter *adapter)
++{
++ int pool, err = 0;
++
++ for (pool = 1; pool < adapter->vmdq_pools; pool++) {
++ unregister_netdev(adapter->vmdq_netdev[pool-1]);
++ free_netdev(adapter->vmdq_netdev[pool-1]);
++ adapter->vmdq_netdev[pool-1] = NULL;
++ }
++ return err;
++}
++#endif /* CONFIG_IGB_VMDQ_NETDEV */
++
+ /**
+ * igb_set_fw_version - Configure version string for ethtool
+ * @adapter: adapter struct
++ *
+ **/
+-void igb_set_fw_version(struct igb_adapter *adapter)
++static void igb_set_fw_version(struct igb_adapter *adapter)
+ {
+ struct e1000_hw *hw = &adapter->hw;
+ struct e1000_fw_version fw;
+
+- igb_get_fw_version(hw, &fw);
++ e1000_get_fw_version(hw, &fw);
+
+ switch (hw->mac.type) {
+ case e1000_i210:
+ case e1000_i211:
+- if (!(igb_get_flash_presence_i210(hw))) {
++ if (!(e1000_get_flash_presence_i210(hw))) {
+ snprintf(adapter->fw_version,
+- sizeof(adapter->fw_version),
+- "%2d.%2d-%d",
+- fw.invm_major, fw.invm_minor,
+- fw.invm_img_type);
++ sizeof(adapter->fw_version),
++ "%2d.%2d-%d",
++ fw.invm_major, fw.invm_minor, fw.invm_img_type);
+ break;
+ }
+ /* fall through */
+ default:
+- /* if option is rom valid, display its version too */
++ /* if option rom is valid, display its version too*/
+ if (fw.or_valid) {
+ snprintf(adapter->fw_version,
+- sizeof(adapter->fw_version),
+- "%d.%d, 0x%08x, %d.%d.%d",
+- fw.eep_major, fw.eep_minor, fw.etrack_id,
+- fw.or_major, fw.or_build, fw.or_patch);
++ sizeof(adapter->fw_version),
++ "%d.%d, 0x%08x, %d.%d.%d",
++ fw.eep_major, fw.eep_minor, fw.etrack_id,
++ fw.or_major, fw.or_build, fw.or_patch);
+ /* no option rom */
+- } else if (fw.etrack_id != 0X0000) {
++ } else {
++ if (fw.etrack_id != 0X0000) {
++ snprintf(adapter->fw_version,
++ sizeof(adapter->fw_version),
++ "%d.%d, 0x%08x",
++ fw.eep_major, fw.eep_minor, fw.etrack_id);
++ } else {
+ snprintf(adapter->fw_version,
+ sizeof(adapter->fw_version),
+- "%d.%d, 0x%08x",
+- fw.eep_major, fw.eep_minor, fw.etrack_id);
+- } else {
+- snprintf(adapter->fw_version,
+- sizeof(adapter->fw_version),
+- "%d.%d.%d",
+- fw.eep_major, fw.eep_minor, fw.eep_build);
++ "%d.%d.%d",
++ fw.eep_major, fw.eep_minor, fw.eep_build);
++ }
+ }
+ break;
+ }
+@@ -2159,126 +2548,130 @@
+ struct e1000_hw *hw = &adapter->hw;
+ u16 eeprom_data;
+
+- hw->nvm.ops.read(hw, NVM_COMPAT, 1, &eeprom_data);
++ e1000_read_nvm(hw, NVM_COMPAT, 1, &eeprom_data);
+ switch (hw->bus.func) {
+ case E1000_FUNC_0:
+- if (eeprom_data & IGB_MAS_ENABLE_0) {
++ if (eeprom_data & IGB_MAS_ENABLE_0)
+ adapter->flags |= IGB_FLAG_MAS_ENABLE;
+- netdev_info(adapter->netdev,
+- "MAS: Enabling Media Autosense for port %d\n",
+- hw->bus.func);
+- }
+ break;
+ case E1000_FUNC_1:
+- if (eeprom_data & IGB_MAS_ENABLE_1) {
++ if (eeprom_data & IGB_MAS_ENABLE_1)
+ adapter->flags |= IGB_FLAG_MAS_ENABLE;
+- netdev_info(adapter->netdev,
+- "MAS: Enabling Media Autosense for port %d\n",
+- hw->bus.func);
+- }
+ break;
+ case E1000_FUNC_2:
+- if (eeprom_data & IGB_MAS_ENABLE_2) {
++ if (eeprom_data & IGB_MAS_ENABLE_2)
+ adapter->flags |= IGB_FLAG_MAS_ENABLE;
+- netdev_info(adapter->netdev,
+- "MAS: Enabling Media Autosense for port %d\n",
+- hw->bus.func);
+- }
+ break;
+ case E1000_FUNC_3:
+- if (eeprom_data & IGB_MAS_ENABLE_3) {
++ if (eeprom_data & IGB_MAS_ENABLE_3)
+ adapter->flags |= IGB_FLAG_MAS_ENABLE;
+- netdev_info(adapter->netdev,
+- "MAS: Enabling Media Autosense for port %d\n",
+- hw->bus.func);
+- }
+ break;
+ default:
+ /* Shouldn't get here */
+- netdev_err(adapter->netdev,
+- "MAS: Invalid port configuration, returning\n");
++ dev_err(pci_dev_to_dev(adapter->pdev),
++ "%s:AMS: Invalid port configuration, returning\n",
++ adapter->netdev->name);
+ break;
+ }
+ }
+
+-/**
+- * igb_init_i2c - Init I2C interface
+- * @adapter: pointer to adapter structure
+- **/
+-static s32 igb_init_i2c(struct igb_adapter *adapter)
++void igb_rar_set(struct igb_adapter *adapter, u32 index)
+ {
+- s32 status = 0;
++ u32 rar_low, rar_high;
++ struct e1000_hw *hw = &adapter->hw;
++ u8 *addr = adapter->mac_table[index].addr;
++ /* HW expects these in little endian so we reverse the byte order
++ * from network order (big endian) to little endian
++ */
++ rar_low = ((u32) addr[0] | ((u32) addr[1] << 8) |
++ ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
++ rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
+
+- /* I2C interface supported on i350 devices */
+- if (adapter->hw.mac.type != e1000_i350)
+- return 0;
++ /* Indicate to hardware the Address is Valid. */
++ if (adapter->mac_table[index].state & IGB_MAC_STATE_IN_USE)
++ rar_high |= E1000_RAH_AV;
+
+- /* Initialize the i2c bus which is controlled by the registers.
+- * This bus will use the i2c_algo_bit structue that implements
+- * the protocol through toggling of the 4 bits in the register.
+- */
+- adapter->i2c_adap.owner = THIS_MODULE;
+- adapter->i2c_algo = igb_i2c_algo;
+- adapter->i2c_algo.data = adapter;
+- adapter->i2c_adap.algo_data = &adapter->i2c_algo;
+- adapter->i2c_adap.dev.parent = &adapter->pdev->dev;
+- strlcpy(adapter->i2c_adap.name, "igb BB",
+- sizeof(adapter->i2c_adap.name));
+- status = i2c_bit_add_bus(&adapter->i2c_adap);
+- return status;
++ if (hw->mac.type == e1000_82575)
++ rar_high |= E1000_RAH_POOL_1 * adapter->mac_table[index].queue;
++ else
++ rar_high |= E1000_RAH_POOL_1 << adapter->mac_table[index].queue;
++
++ E1000_WRITE_REG(hw, E1000_RAL(index), rar_low);
++ E1000_WRITE_FLUSH(hw);
++ E1000_WRITE_REG(hw, E1000_RAH(index), rar_high);
++ E1000_WRITE_FLUSH(hw);
+ }
+
+ /**
+- * igb_probe - Device Initialization Routine
+- * @pdev: PCI device information struct
+- * @ent: entry in igb_pci_tbl
++ * igb_probe - Device Initialization Routine
++ * @pdev: PCI device information struct
++ * @ent: entry in igb_pci_tbl
+ *
+- * Returns 0 on success, negative on failure
++ * Returns 0 on success, negative on failure
+ *
+- * igb_probe initializes an adapter identified by a pci_dev structure.
+- * The OS initialization, configuring of the adapter private structure,
+- * and a hardware reset occur.
++ * igb_probe initializes an adapter identified by a pci_dev structure.
++ * The OS initialization, configuring of the adapter private structure,
++ * and a hardware reset occur.
+ **/
+-static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
++static int igb_probe(struct pci_dev *pdev,
++ const struct pci_device_id *ent)
+ {
+ struct net_device *netdev;
+ struct igb_adapter *adapter;
+ struct e1000_hw *hw;
+ u16 eeprom_data = 0;
++ u8 pba_str[E1000_PBANUM_LENGTH];
+ s32 ret_val;
+ static int global_quad_port_a; /* global quad port a indication */
+- const struct e1000_info *ei = igb_info_tbl[ent->driver_data];
+ int err, pci_using_dac;
+- u8 part_str[E1000_PBANUM_LENGTH];
+-
+- /* Catch broken hardware that put the wrong VF device ID in
+- * the PCIe SR-IOV capability.
+- */
+- if (pdev->is_virtfn) {
+- WARN(1, KERN_ERR "%s (%hx:%hx) should not be a VF!\n",
+- pci_name(pdev), pdev->vendor, pdev->device);
+- return -EINVAL;
+- }
++ static int cards_found;
++#ifdef HAVE_NDO_SET_FEATURES
++#ifdef HAVE_RHEL6_NET_DEVICE_OPS_EXT
++ u32 hw_features;
++#else
++ netdev_features_t hw_features;
++#endif
++#endif
+
+ err = pci_enable_device_mem(pdev);
+ if (err)
+ return err;
+
+ pci_using_dac = 0;
+- err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
++ err = dma_set_mask(pci_dev_to_dev(pdev), DMA_BIT_MASK(64));
+ if (!err) {
+- pci_using_dac = 1;
++ err = dma_set_coherent_mask(pci_dev_to_dev(pdev),
++ DMA_BIT_MASK(64));
++ if (!err)
++ pci_using_dac = 1;
+ } else {
+- err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
++ err = dma_set_mask(pci_dev_to_dev(pdev), DMA_BIT_MASK(32));
+ if (err) {
+- dev_err(&pdev->dev,
+- "No usable DMA configuration, aborting\n");
+- goto err_dma;
++ err = dma_set_coherent_mask(pci_dev_to_dev(pdev),
++ DMA_BIT_MASK(32));
++ if (err) {
++ IGB_ERR(
++ "No usable DMA configuration, aborting\n");
++ goto err_dma;
++ }
+ }
+ }
+
+- err = pci_request_selected_regions(pdev, pci_select_bars(pdev,
+- IORESOURCE_MEM),
++#ifndef HAVE_ASPM_QUIRKS
++ /* 82575 requires that the pci-e link partner disable the L0s state */
++ switch (pdev->device) {
++ case E1000_DEV_ID_82575EB_COPPER:
++ case E1000_DEV_ID_82575EB_FIBER_SERDES:
++ case E1000_DEV_ID_82575GB_QUAD_COPPER:
++ pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S);
++ default:
++ break;
++ }
++
++#endif /* HAVE_ASPM_QUIRKS */
++ err = pci_request_selected_regions(pdev,
++ pci_select_bars(pdev,
++ IORESOURCE_MEM),
+ igb_driver_name);
+ if (err)
+ goto err_pci_reg;
+@@ -2286,14 +2679,18 @@
+ pci_enable_pcie_error_reporting(pdev);
+
+ pci_set_master(pdev);
+- pci_save_state(pdev);
+
+ err = -ENOMEM;
++#ifdef HAVE_TX_MQ
+ netdev = alloc_etherdev_mq(sizeof(struct igb_adapter),
+ IGB_MAX_TX_QUEUES);
++#else
++ netdev = alloc_etherdev(sizeof(struct igb_adapter));
++#endif /* HAVE_TX_MQ */
+ if (!netdev)
+ goto err_alloc_etherdev;
+
++ SET_MODULE_OWNER(netdev);
+ SET_NETDEV_DEV(netdev, &pdev->dev);
+
+ pci_set_drvdata(pdev, netdev);
+@@ -2302,158 +2699,225 @@
+ adapter->pdev = pdev;
+ hw = &adapter->hw;
+ hw->back = adapter;
+- adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
++ adapter->port_num = hw->bus.func;
++ adapter->msg_enable = (1 << debug) - 1;
+
++#ifdef HAVE_PCI_ERS
++ err = pci_save_state(pdev);
++ if (err)
++ goto err_ioremap;
++#endif
+ err = -EIO;
+- hw->hw_addr = pci_iomap(pdev, 0, 0);
+- if (!hw->hw_addr)
++ adapter->io_addr = ioremap(pci_resource_start(pdev, 0),
++ pci_resource_len(pdev, 0));
++ if (!adapter->io_addr)
+ goto err_ioremap;
++ /* hw->hw_addr can be zeroed, so use adapter->io_addr for unmap */
++ hw->hw_addr = adapter->io_addr;
+
++#ifdef HAVE_NET_DEVICE_OPS
+ netdev->netdev_ops = &igb_netdev_ops;
++#ifdef HAVE_RHEL6_NET_DEVICE_OPS_EXT
++ set_netdev_ops_ext(netdev, &igb_netdev_ops_ext);
++#endif /* HAVE_RHEL6_NET_DEVICE_OPS_EXT */
++#else /* HAVE_NET_DEVICE_OPS */
++ netdev->open = &igb_open;
++ netdev->stop = &igb_close;
++ netdev->get_stats = &igb_get_stats;
++#ifdef HAVE_SET_RX_MODE
++ netdev->set_rx_mode = &igb_set_rx_mode;
++#endif
++ netdev->set_multicast_list = &igb_set_rx_mode;
++ netdev->set_mac_address = &igb_set_mac;
++ netdev->change_mtu = &igb_change_mtu;
++ netdev->do_ioctl = &igb_ioctl;
++#ifdef HAVE_TX_TIMEOUT
++ netdev->tx_timeout = &igb_tx_timeout;
++#endif
++ netdev->vlan_rx_register = igb_vlan_mode;
++ netdev->vlan_rx_add_vid = igb_vlan_rx_add_vid;
++ netdev->vlan_rx_kill_vid = igb_vlan_rx_kill_vid;
++#ifdef CONFIG_NET_POLL_CONTROLLER
++ netdev->poll_controller = igb_netpoll;
++#endif
++ netdev->hard_start_xmit = &igb_xmit_frame;
++#endif /* HAVE_NET_DEVICE_OPS */
+ igb_set_ethtool_ops(netdev);
++#ifdef HAVE_TX_TIMEOUT
+ netdev->watchdog_timeo = 5 * HZ;
++#endif
+
+ strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
+
+- netdev->mem_start = pci_resource_start(pdev, 0);
+- netdev->mem_end = pci_resource_end(pdev, 0);
+-
+- /* PCI config space info */
+- hw->vendor_id = pdev->vendor;
+- hw->device_id = pdev->device;
+- hw->revision_id = pdev->revision;
+- hw->subsystem_vendor_id = pdev->subsystem_vendor;
+- hw->subsystem_device_id = pdev->subsystem_device;
+-
+- /* Copy the default MAC, PHY and NVM function pointers */
+- memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops));
+- memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops));
+- memcpy(&hw->nvm.ops, ei->nvm_ops, sizeof(hw->nvm.ops));
+- /* Initialize skew-specific constants */
+- err = ei->get_invariants(hw);
+- if (err)
+- goto err_sw_init;
++ adapter->bd_number = cards_found;
+
+ /* setup the private structure */
+ err = igb_sw_init(adapter);
+ if (err)
+ goto err_sw_init;
+
+- igb_get_bus_info_pcie(hw);
++ igb_e1000_get_bus_info(hw);
+
+- hw->phy.autoneg_wait_to_complete = false;
++ hw->phy.autoneg_wait_to_complete = FALSE;
++ hw->mac.adaptive_ifs = FALSE;
+
+ /* Copper options */
+ if (hw->phy.media_type == e1000_media_type_copper) {
+ hw->phy.mdix = AUTO_ALL_MODES;
+- hw->phy.disable_polarity_correction = false;
++ hw->phy.disable_polarity_correction = FALSE;
+ hw->phy.ms_type = e1000_ms_hw_default;
+ }
+
+- if (igb_check_reset_block(hw))
+- dev_info(&pdev->dev,
++ if (e1000_check_reset_block(hw))
++ dev_info(pci_dev_to_dev(pdev),
+ "PHY reset is blocked due to SOL/IDER session.\n");
+
+- /* features is initialized to 0 in allocation, it might have bits
++ /*
++ * features is initialized to 0 in allocation, it might have bits
+ * set by igb_sw_init so we should use an or instead of an
+ * assignment.
+ */
+ netdev->features |= NETIF_F_SG |
+ NETIF_F_IP_CSUM |
++#ifdef NETIF_F_IPV6_CSUM
+ NETIF_F_IPV6_CSUM |
++#endif
++#ifdef NETIF_F_TSO
+ NETIF_F_TSO |
++#ifdef NETIF_F_TSO6
+ NETIF_F_TSO6 |
++#endif
++#endif /* NETIF_F_TSO */
++#ifdef NETIF_F_RXHASH
+ NETIF_F_RXHASH |
++#endif
+ NETIF_F_RXCSUM |
++#ifdef NETIF_F_HW_VLAN_CTAG_RX
+ NETIF_F_HW_VLAN_CTAG_RX |
+ NETIF_F_HW_VLAN_CTAG_TX;
++#else
++ NETIF_F_HW_VLAN_RX |
++ NETIF_F_HW_VLAN_TX;
++#endif
++
++ if (hw->mac.type >= e1000_82576)
++ netdev->features |= NETIF_F_SCTP_CSUM;
+
++#ifdef HAVE_NDO_SET_FEATURES
+ /* copy netdev features into list of user selectable features */
+- netdev->hw_features |= netdev->features;
+- netdev->hw_features |= NETIF_F_RXALL;
++#ifndef HAVE_RHEL6_NET_DEVICE_OPS_EXT
++ hw_features = netdev->hw_features;
++
++ /* give us the option of enabling LRO later */
++ hw_features |= NETIF_F_LRO;
++
++#else
++ hw_features = get_netdev_hw_features(netdev);
++
++#endif /* HAVE_RHEL6_NET_DEVICE_OPS_EXT */
++ hw_features |= netdev->features;
++
++#else
++#ifdef NETIF_F_GRO
++
++ /* this is only needed on kernels prior to 2.6.39 */
++ netdev->features |= NETIF_F_GRO;
++#endif /* NETIF_F_GRO */
++#endif /* HAVE_NDO_SET_FEATURES */
+
+ /* set this bit last since it cannot be part of hw_features */
++#ifdef NETIF_F_HW_VLAN_CTAG_FILTER
+ netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
++#endif /* NETIF_F_HW_FLAN_CTAG_FILTER */
++#ifdef NETIF_F_HW_VLAN_TX
++ netdev->features |= NETIF_F_HW_VLAN_FILTER;
++#endif /* NETIF_F_HW_VLAN_TX */
++
++#ifdef HAVE_NDO_SET_FEATURES
++#ifdef HAVE_RHEL6_NET_DEVICE_OPS_EXT
++ set_netdev_hw_features(netdev, hw_features);
++#else
++ netdev->hw_features = hw_features;
++#endif
++#endif
+
++#ifdef HAVE_NETDEV_VLAN_FEATURES
+ netdev->vlan_features |= NETIF_F_TSO |
+ NETIF_F_TSO6 |
+ NETIF_F_IP_CSUM |
+ NETIF_F_IPV6_CSUM |
+ NETIF_F_SG;
+
+- netdev->priv_flags |= IFF_SUPP_NOFCS;
+-
+- if (pci_using_dac) {
++#endif /* HAVE_NETDEV_VLAN_FEATURES */
++ if (pci_using_dac)
+ netdev->features |= NETIF_F_HIGHDMA;
+- netdev->vlan_features |= NETIF_F_HIGHDMA;
+- }
+
+- if (hw->mac.type >= e1000_82576) {
+- netdev->hw_features |= NETIF_F_SCTP_CSUM;
+- netdev->features |= NETIF_F_SCTP_CSUM;
+- }
+-
+- netdev->priv_flags |= IFF_UNICAST_FLT;
+-
+- adapter->en_mng_pt = igb_enable_mng_pass_thru(hw);
++ adapter->en_mng_pt = igb_e1000_enable_mng_pass_thru(hw);
++#ifdef DEBUG
++ if (adapter->dmac != IGB_DMAC_DISABLE)
++ netdev_info(netdev, "%s: DMA Coalescing is enabled..\n",
++ netdev->name);
++#endif
+
+ /* before reading the NVM, reset the controller to put the device in a
+ * known good starting state
+ */
+- hw->mac.ops.reset_hw(hw);
++ igb_e1000_reset_hw(hw);
+
+- /* make sure the NVM is good , i211/i210 parts can have special NVM
+- * that doesn't contain a checksum
+- */
+- switch (hw->mac.type) {
+- case e1000_i210:
+- case e1000_i211:
+- if (igb_get_flash_presence_i210(hw)) {
+- if (hw->nvm.ops.validate(hw) < 0) {
+- dev_err(&pdev->dev,
+- "The NVM Checksum Is Not Valid\n");
+- err = -EIO;
+- goto err_eeprom;
+- }
+- }
+- break;
+- default:
+- if (hw->nvm.ops.validate(hw) < 0) {
+- dev_err(&pdev->dev, "The NVM Checksum Is Not Valid\n");
+- err = -EIO;
+- goto err_eeprom;
+- }
+- break;
++ /* make sure the NVM is good */
++ if (e1000_validate_nvm_checksum(hw) < 0) {
++ dev_err(pci_dev_to_dev(pdev),
++ "The NVM Checksum Is Not Valid\n");
++ err = -EIO;
++ goto err_eeprom;
+ }
+
+ /* copy the MAC address out of the NVM */
+- if (hw->mac.ops.read_mac_addr(hw))
+- dev_err(&pdev->dev, "NVM Read Error\n");
+-
++ if (igb_e1000_read_mac_addr(hw))
++ dev_err(pci_dev_to_dev(pdev), "NVM Read Error\n");
+ memcpy(netdev->dev_addr, hw->mac.addr, netdev->addr_len);
++#ifdef ETHTOOL_GPERMADDR
++ memcpy(netdev->perm_addr, hw->mac.addr, netdev->addr_len);
+
++ if (!is_valid_ether_addr(netdev->perm_addr)) {
++#else
+ if (!is_valid_ether_addr(netdev->dev_addr)) {
+- dev_err(&pdev->dev, "Invalid MAC Address\n");
++#endif
++ dev_err(pci_dev_to_dev(pdev), "Invalid MAC Address\n");
+ err = -EIO;
+ goto err_eeprom;
+ }
+
++ memcpy(&adapter->mac_table[0].addr, hw->mac.addr, netdev->addr_len);
++ adapter->mac_table[0].queue = adapter->vfs_allocated_count;
++ adapter->mac_table[0].state = (IGB_MAC_STATE_DEFAULT
++ | IGB_MAC_STATE_IN_USE);
++ igb_rar_set(adapter, 0);
++
+ /* get firmware version for ethtool -i */
+ igb_set_fw_version(adapter);
+
+ /* configure RXPBSIZE and TXPBSIZE */
+ if (hw->mac.type == e1000_i210) {
+- wr32(E1000_RXPBS, I210_RXPBSIZE_DEFAULT);
+- wr32(E1000_TXPBS, I210_TXPBSIZE_DEFAULT);
++ E1000_WRITE_REG(hw, E1000_RXPBS, I210_RXPBSIZE_DEFAULT);
++ E1000_WRITE_REG(hw, E1000_TXPBS, I210_TXPBSIZE_DEFAULT);
+ }
+
+- setup_timer(&adapter->watchdog_timer, igb_watchdog,
++ /* Check if Media Autosense is enabled */
++ if (hw->mac.type == e1000_82580)
++ igb_init_mas(adapter);
++ setup_timer(&adapter->watchdog_timer, &igb_watchdog,
+ (unsigned long) adapter);
+- setup_timer(&adapter->phy_info_timer, igb_update_phy_info,
++ if (adapter->flags & IGB_FLAG_DETECT_BAD_DMA)
++ setup_timer(&adapter->dma_err_timer, &igb_dma_err_timer,
++ (unsigned long) adapter);
++ setup_timer(&adapter->phy_info_timer, &igb_update_phy_info,
+ (unsigned long) adapter);
+
+ INIT_WORK(&adapter->reset_task, igb_reset_task);
+ INIT_WORK(&adapter->watchdog_task, igb_watchdog_task);
++ if (adapter->flags & IGB_FLAG_DETECT_BAD_DMA)
++ INIT_WORK(&adapter->dma_err_task, igb_dma_err_task);
+
+ /* Initialize link properties that are user-changeable */
+ adapter->fc_autoneg = true;
+@@ -2463,19 +2927,19 @@
+ hw->fc.requested_mode = e1000_fc_default;
+ hw->fc.current_mode = e1000_fc_default;
+
+- igb_validate_mdi_setting(hw);
++ igb_e1000_validate_mdi_setting(hw);
+
+ /* By default, support wake on port A */
+ if (hw->bus.func == 0)
+ adapter->flags |= IGB_FLAG_WOL_SUPPORTED;
+
+- /* Check the NVM for wake support on non-port A ports */
++ /* Check the NVM for wake support for non-port A ports */
+ if (hw->mac.type >= e1000_82580)
+ hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A +
+ NVM_82580_LAN_FUNC_OFFSET(hw->bus.func), 1,
+ &eeprom_data);
+ else if (hw->bus.func == 1)
+- hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
++ e1000_read_nvm(hw, NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
+
+ if (eeprom_data & IGB_EEPROM_APME)
+ adapter->flags |= IGB_FLAG_WOL_SUPPORTED;
+@@ -2494,7 +2958,7 @@
+ /* Wake events only supported on port A for dual fiber
+ * regardless of eeprom setting
+ */
+- if (rd32(E1000_STATUS) & E1000_STATUS_FUNC_1)
++ if (E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_FUNC_1)
+ adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED;
+ break;
+ case E1000_DEV_ID_82576_QUAD_COPPER:
+@@ -2509,9 +2973,7 @@
+ global_quad_port_a = 0;
+ break;
+ default:
+- /* If the device can't wake, don't set software support */
+- if (!device_can_wakeup(&adapter->pdev->dev))
+- adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED;
++ break;
+ }
+
+ /* initialize the wol settings based on the eeprom settings */
+@@ -2525,145 +2987,185 @@
+ adapter->wol = 0;
+ }
+
+- device_set_wakeup_enable(&adapter->pdev->dev,
++ /* Some vendors want the ability to Use the EEPROM setting as
++ * enable/disable only, and not for capability
++ */
++ if (((hw->mac.type == e1000_i350) ||
++ (hw->mac.type == e1000_i354)) &&
++ (pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)) {
++ adapter->flags |= IGB_FLAG_WOL_SUPPORTED;
++ adapter->wol = 0;
++ }
++ if (hw->mac.type == e1000_i350) {
++ if (((pdev->subsystem_device == 0x5001) ||
++ (pdev->subsystem_device == 0x5002)) &&
++ (hw->bus.func == 0)) {
++ adapter->flags |= IGB_FLAG_WOL_SUPPORTED;
++ adapter->wol = 0;
++ }
++ if (pdev->subsystem_device == 0x1F52)
++ adapter->flags |= IGB_FLAG_WOL_SUPPORTED;
++ }
++
++ device_set_wakeup_enable(pci_dev_to_dev(adapter->pdev),
+ adapter->flags & IGB_FLAG_WOL_SUPPORTED);
+
+ /* reset the hardware with the new settings */
+ igb_reset(adapter);
++ adapter->devrc = 0;
+
++#ifdef HAVE_I2C_SUPPORT
+ /* Init the I2C interface */
+ err = igb_init_i2c(adapter);
+ if (err) {
+ dev_err(&pdev->dev, "failed to init i2c interface\n");
+ goto err_eeprom;
+ }
++#endif /* HAVE_I2C_SUPPORT */
+
+ /* let the f/w know that the h/w is now under the control of the
+ * driver.
+ */
+ igb_get_hw_control(adapter);
+
+- strcpy(netdev->name, "eth%d");
++ strncpy(netdev->name, "eth%d", IFNAMSIZ);
+ err = register_netdev(netdev);
+ if (err)
+ goto err_register;
+
++#ifdef CONFIG_IGB_VMDQ_NETDEV
++ err = igb_init_vmdq_netdevs(adapter);
++ if (err)
++ goto err_register;
++#endif
+ /* carrier off reporting is important to ethtool even BEFORE open */
+ netif_carrier_off(netdev);
+
+-#ifdef CONFIG_IGB_DCA
+- if (dca_add_requester(&pdev->dev) == 0) {
++#ifdef IGB_DCA
++ if (dca_add_requester(&pdev->dev) == E1000_SUCCESS) {
+ adapter->flags |= IGB_FLAG_DCA_ENABLED;
+- dev_info(&pdev->dev, "DCA enabled\n");
++ dev_info(pci_dev_to_dev(pdev), "DCA enabled\n");
+ igb_setup_dca(adapter);
+ }
+
+ #endif
+-#ifdef CONFIG_IGB_HWMON
++#ifdef HAVE_PTP_1588_CLOCK
++ /* do hw tstamp init after resetting */
++ igb_ptp_init(adapter);
++#endif /* HAVE_PTP_1588_CLOCK */
++
++ dev_info(pci_dev_to_dev(pdev), "Intel(R) Gigabit Ethernet Network Connection\n");
++ /* print bus type/speed/width info */
++ dev_info(pci_dev_to_dev(pdev), "%s: (PCIe:%s:%s) ",
++ netdev->name,
++ ((hw->bus.speed == e1000_bus_speed_2500) ? "2.5GT/s" :
++ (hw->bus.speed == e1000_bus_speed_5000) ? "5.0GT/s" :
++ (hw->mac.type == e1000_i354) ? "integrated" : "unknown"),
++ ((hw->bus.width == e1000_bus_width_pcie_x4) ? "Width x4" :
++ (hw->bus.width == e1000_bus_width_pcie_x2) ? "Width x2" :
++ (hw->bus.width == e1000_bus_width_pcie_x1) ? "Width x1" :
++ (hw->mac.type == e1000_i354) ? "integrated" : "unknown"));
++ netdev_info(netdev, "MAC: %pM\n", netdev->dev_addr);
++
++ ret_val = e1000_read_pba_string(hw, pba_str, E1000_PBANUM_LENGTH);
++ if (ret_val)
++ strcpy(pba_str, "Unknown");
++ dev_info(pci_dev_to_dev(pdev), "%s: PBA No: %s\n", netdev->name,
++ pba_str);
++
+ /* Initialize the thermal sensor on i350 devices. */
+- if (hw->mac.type == e1000_i350 && hw->bus.func == 0) {
+- u16 ets_word;
++ if (hw->mac.type == e1000_i350) {
++ if (hw->bus.func == 0) {
++ u16 ets_word;
+
+- /* Read the NVM to determine if this i350 device supports an
+- * external thermal sensor.
+- */
+- hw->nvm.ops.read(hw, NVM_ETS_CFG, 1, &ets_word);
+- if (ets_word != 0x0000 && ets_word != 0xFFFF)
+- adapter->ets = true;
+- else
+- adapter->ets = false;
+- if (igb_sysfs_init(adapter))
+- dev_err(&pdev->dev,
+- "failed to allocate sysfs resources\n");
+- } else {
+- adapter->ets = false;
+- }
+-#endif
+- /* Check if Media Autosense is enabled */
+- adapter->ei = *ei;
+- if (hw->dev_spec._82575.mas_capable)
+- igb_init_mas(adapter);
++ /*
++ * Read the NVM to determine if this i350 device
++ * supports an external thermal sensor.
++ */
++ e1000_read_nvm(hw, NVM_ETS_CFG, 1, &ets_word);
++ if (ets_word != 0x0000 && ets_word != 0xFFFF)
++ adapter->ets = true;
++ else
++ adapter->ets = false;
++ }
++#ifdef IGB_HWMON
+
+- /* do hw tstamp init after resetting */
+- igb_ptp_init(adapter);
++ igb_sysfs_init(adapter);
++#else
++#ifdef IGB_PROCFS
+
+- dev_info(&pdev->dev, "Intel(R) Gigabit Ethernet Network Connection\n");
+- /* print bus type/speed/width info, not applicable to i354 */
+- if (hw->mac.type != e1000_i354) {
+- dev_info(&pdev->dev, "%s: (PCIe:%s:%s) %pM\n",
+- netdev->name,
+- ((hw->bus.speed == e1000_bus_speed_2500) ? "2.5Gb/s" :
+- (hw->bus.speed == e1000_bus_speed_5000) ? "5.0Gb/s" :
+- "unknown"),
+- ((hw->bus.width == e1000_bus_width_pcie_x4) ?
+- "Width x4" :
+- (hw->bus.width == e1000_bus_width_pcie_x2) ?
+- "Width x2" :
+- (hw->bus.width == e1000_bus_width_pcie_x1) ?
+- "Width x1" : "unknown"), netdev->dev_addr);
+- }
+-
+- if ((hw->mac.type >= e1000_i210 ||
+- igb_get_flash_presence_i210(hw))) {
+- ret_val = igb_read_part_string(hw, part_str,
+- E1000_PBANUM_LENGTH);
++ igb_procfs_init(adapter);
++#endif /* IGB_PROCFS */
++#endif /* IGB_HWMON */
+ } else {
+- ret_val = -E1000_ERR_INVM_VALUE_NOT_FOUND;
++ adapter->ets = false;
+ }
+
+- if (ret_val)
+- strcpy(part_str, "Unknown");
+- dev_info(&pdev->dev, "%s: PBA No: %s\n", netdev->name, part_str);
+- dev_info(&pdev->dev,
+- "Using %s interrupts. %d rx queue(s), %d tx queue(s)\n",
+- (adapter->flags & IGB_FLAG_HAS_MSIX) ? "MSI-X" :
+- (adapter->flags & IGB_FLAG_HAS_MSI) ? "MSI" : "legacy",
+- adapter->num_rx_queues, adapter->num_tx_queues);
+ if (hw->phy.media_type == e1000_media_type_copper) {
+ switch (hw->mac.type) {
+ case e1000_i350:
+ case e1000_i210:
+ case e1000_i211:
+ /* Enable EEE for internal copper PHY devices */
+- err = igb_set_eee_i350(hw);
++ err = e1000_set_eee_i350(hw, true, true);
+ if ((!err) &&
+- (!hw->dev_spec._82575.eee_disable)) {
++ (adapter->flags & IGB_FLAG_EEE))
+ adapter->eee_advert =
+ MDIO_EEE_100TX | MDIO_EEE_1000T;
+- adapter->flags |= IGB_FLAG_EEE;
+- }
+ break;
+ case e1000_i354:
+- if ((rd32(E1000_CTRL_EXT) &
+- E1000_CTRL_EXT_LINK_MODE_SGMII)) {
+- err = igb_set_eee_i354(hw);
++ if ((E1000_READ_REG(hw, E1000_CTRL_EXT)) &
++ (E1000_CTRL_EXT_LINK_MODE_SGMII)) {
++ err = e1000_set_eee_i354(hw, true, true);
+ if ((!err) &&
+- (!hw->dev_spec._82575.eee_disable)) {
++ (adapter->flags & IGB_FLAG_EEE))
+ adapter->eee_advert =
+ MDIO_EEE_100TX | MDIO_EEE_1000T;
+- adapter->flags |= IGB_FLAG_EEE;
+- }
+ }
+ break;
+ default:
+ break;
+ }
+ }
++
++ /* send driver version info to firmware */
++ if ((hw->mac.type >= e1000_i350) &&
++ (e1000_get_flash_presence_i210(hw)))
++ igb_init_fw(adapter);
++
++#ifndef IGB_NO_LRO
++ if (netdev->features & NETIF_F_LRO)
++ dev_info(pci_dev_to_dev(pdev), "Internal LRO is enabled\n");
++ else
++ dev_info(pci_dev_to_dev(pdev), "LRO is disabled\n");
++#endif
++ dev_info(pci_dev_to_dev(pdev),
++ "Using %s interrupts. %d rx queue(s), %d tx queue(s)\n",
++ adapter->msix_entries ? "MSI-X" :
++ (adapter->flags & IGB_FLAG_HAS_MSI) ? "MSI" : "legacy",
++ adapter->num_rx_queues, adapter->num_tx_queues);
++
++ cards_found++;
++
+ pm_runtime_put_noidle(&pdev->dev);
+ return 0;
+
+ err_register:
+ igb_release_hw_control(adapter);
++#ifdef HAVE_I2C_SUPPORT
+ memset(&adapter->i2c_adap, 0, sizeof(adapter->i2c_adap));
++#endif /* HAVE_I2C_SUPPORT */
+ err_eeprom:
+- if (!igb_check_reset_block(hw))
+- igb_reset_phy(hw);
++ if (!e1000_check_reset_block(hw))
++ igb_e1000_phy_hw_reset(hw);
+
+ if (hw->flash_address)
+ iounmap(hw->flash_address);
+ err_sw_init:
++ kfree(adapter->shadow_vfta);
+ igb_clear_interrupt_scheme(adapter);
+- pci_iounmap(pdev, hw->hw_addr);
++ igb_reset_sriov_capability(adapter);
++ iounmap(adapter->io_addr);
+ err_ioremap:
+ free_netdev(netdev);
+ err_alloc_etherdev:
+@@ -2674,117 +3176,28 @@
+ pci_disable_device(pdev);
+ return err;
+ }
+-
+-#ifdef CONFIG_PCI_IOV
+-static int igb_disable_sriov(struct pci_dev *pdev)
+-{
+- struct net_device *netdev = pci_get_drvdata(pdev);
+- struct igb_adapter *adapter = netdev_priv(netdev);
+- struct e1000_hw *hw = &adapter->hw;
+-
+- /* reclaim resources allocated to VFs */
+- if (adapter->vf_data) {
+- /* disable iov and allow time for transactions to clear */
+- if (pci_vfs_assigned(pdev)) {
+- dev_warn(&pdev->dev,
+- "Cannot deallocate SR-IOV virtual functions while they are assigned - VFs will not be deallocated\n");
+- return -EPERM;
+- } else {
+- pci_disable_sriov(pdev);
+- msleep(500);
+- }
+-
+- kfree(adapter->vf_data);
+- adapter->vf_data = NULL;
+- adapter->vfs_allocated_count = 0;
+- wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ);
+- wrfl();
+- msleep(100);
+- dev_info(&pdev->dev, "IOV Disabled\n");
+-
+- /* Re-enable DMA Coalescing flag since IOV is turned off */
+- adapter->flags |= IGB_FLAG_DMAC;
+- }
+-
+- return 0;
+-}
+-
+-static int igb_enable_sriov(struct pci_dev *pdev, int num_vfs)
+-{
+- struct net_device *netdev = pci_get_drvdata(pdev);
+- struct igb_adapter *adapter = netdev_priv(netdev);
+- int old_vfs = pci_num_vf(pdev);
+- int err = 0;
+- int i;
+-
+- if (!(adapter->flags & IGB_FLAG_HAS_MSIX) || num_vfs > 7) {
+- err = -EPERM;
+- goto out;
+- }
+- if (!num_vfs)
+- goto out;
+-
+- if (old_vfs) {
+- dev_info(&pdev->dev, "%d pre-allocated VFs found - override max_vfs setting of %d\n",
+- old_vfs, max_vfs);
+- adapter->vfs_allocated_count = old_vfs;
+- } else
+- adapter->vfs_allocated_count = num_vfs;
+-
+- adapter->vf_data = kcalloc(adapter->vfs_allocated_count,
+- sizeof(struct vf_data_storage), GFP_KERNEL);
+-
+- /* if allocation failed then we do not support SR-IOV */
+- if (!adapter->vf_data) {
+- adapter->vfs_allocated_count = 0;
+- dev_err(&pdev->dev,
+- "Unable to allocate memory for VF Data Storage\n");
+- err = -ENOMEM;
+- goto out;
+- }
+-
+- /* only call pci_enable_sriov() if no VFs are allocated already */
+- if (!old_vfs) {
+- err = pci_enable_sriov(pdev, adapter->vfs_allocated_count);
+- if (err)
+- goto err_out;
+- }
+- dev_info(&pdev->dev, "%d VFs allocated\n",
+- adapter->vfs_allocated_count);
+- for (i = 0; i < adapter->vfs_allocated_count; i++)
+- igb_vf_configure(adapter, i);
+-
+- /* DMA Coalescing is not supported in IOV mode. */
+- adapter->flags &= ~IGB_FLAG_DMAC;
+- goto out;
+-
+-err_out:
+- kfree(adapter->vf_data);
+- adapter->vf_data = NULL;
+- adapter->vfs_allocated_count = 0;
+-out:
+- return err;
+-}
+-
+-#endif
+-/**
++#ifdef HAVE_I2C_SUPPORT
++/*
+ * igb_remove_i2c - Cleanup I2C interface
+ * @adapter: pointer to adapter structure
+- **/
++ *
++ */
+ static void igb_remove_i2c(struct igb_adapter *adapter)
+ {
++
+ /* free the adapter bus structure */
+ i2c_del_adapter(&adapter->i2c_adap);
+ }
++#endif /* HAVE_I2C_SUPPORT */
+
+ /**
+- * igb_remove - Device Removal Routine
+- * @pdev: PCI device information struct
++ * igb_remove - Device Removal Routine
++ * @pdev: PCI device information struct
+ *
+- * igb_remove is called by the PCI subsystem to alert the driver
+- * that it should release a PCI device. The could be caused by a
+- * Hot-Plug event, or because the driver is going to be removed from
+- * memory.
++ * igb_remove is called by the PCI subsystem to alert the driver
++ * that it should release a PCI device. The could be caused by a
++ * Hot-Plug event, or because the driver is going to be removed from
++ * memory.
+ **/
+ static void igb_remove(struct pci_dev *pdev)
+ {
+@@ -2793,30 +3206,39 @@
+ struct e1000_hw *hw = &adapter->hw;
+
+ pm_runtime_get_noresume(&pdev->dev);
+-#ifdef CONFIG_IGB_HWMON
+- igb_sysfs_exit(adapter);
+-#endif
++#ifdef HAVE_I2C_SUPPORT
+ igb_remove_i2c(adapter);
++#endif /* HAVE_I2C_SUPPORT */
++#ifdef HAVE_PTP_1588_CLOCK
+ igb_ptp_stop(adapter);
+- /* The watchdog timer may be rescheduled, so explicitly
+- * disable watchdog from being rescheduled.
++#endif /* HAVE_PTP_1588_CLOCK */
++
++ /* flush_scheduled work may reschedule our watchdog task, so
++ * explicitly disable watchdog tasks from being rescheduled
+ */
+ set_bit(__IGB_DOWN, &adapter->state);
+ del_timer_sync(&adapter->watchdog_timer);
++ if (adapter->flags & IGB_FLAG_DETECT_BAD_DMA)
++ del_timer_sync(&adapter->dma_err_timer);
+ del_timer_sync(&adapter->phy_info_timer);
+
+- cancel_work_sync(&adapter->reset_task);
+- cancel_work_sync(&adapter->watchdog_task);
++ flush_scheduled_work();
+
+-#ifdef CONFIG_IGB_DCA
++#ifdef IGB_DCA
+ if (adapter->flags & IGB_FLAG_DCA_ENABLED) {
+- dev_info(&pdev->dev, "DCA disabled\n");
++ dev_info(pci_dev_to_dev(pdev), "DCA disabled\n");
+ dca_remove_requester(&pdev->dev);
+ adapter->flags &= ~IGB_FLAG_DCA_ENABLED;
+- wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_DISABLE);
++ E1000_WRITE_REG(hw, E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_DISABLE);
+ }
+ #endif
+
++#ifdef CONFIG_IGB_VMDQ_NETDEV
++ igb_remove_vmdq_netdevs(adapter);
++#endif
++
++ igb_reset_sriov_capability(adapter);
++
+ /* Release control of h/w to f/w. If f/w is AMT enabled, this
+ * would have already happened in close and is redundant.
+ */
+@@ -2826,16 +3248,21 @@
+
+ igb_clear_interrupt_scheme(adapter);
+
+-#ifdef CONFIG_PCI_IOV
+- igb_disable_sriov(pdev);
+-#endif
+-
+- pci_iounmap(pdev, hw->hw_addr);
++ if (adapter->io_addr)
++ iounmap(adapter->io_addr);
+ if (hw->flash_address)
+ iounmap(hw->flash_address);
+ pci_release_selected_regions(pdev,
+ pci_select_bars(pdev, IORESOURCE_MEM));
+
++#ifdef IGB_HWMON
++ igb_sysfs_exit(adapter);
++#else
++#ifdef IGB_PROCFS
++ igb_procfs_exit(adapter);
++#endif /* IGB_PROCFS */
++#endif /* IGB_HWMON */
++ kfree(adapter->mac_table);
+ kfree(adapter->shadow_vfta);
+ free_netdev(netdev);
+
+@@ -2845,110 +3272,12 @@
+ }
+
+ /**
+- * igb_probe_vfs - Initialize vf data storage and add VFs to pci config space
+- * @adapter: board private structure to initialize
+- *
+- * This function initializes the vf specific data storage and then attempts to
+- * allocate the VFs. The reason for ordering it this way is because it is much
+- * mor expensive time wise to disable SR-IOV than it is to allocate and free
+- * the memory for the VFs.
+- **/
+-static void igb_probe_vfs(struct igb_adapter *adapter)
+-{
+-#ifdef CONFIG_PCI_IOV
+- struct pci_dev *pdev = adapter->pdev;
+- struct e1000_hw *hw = &adapter->hw;
+-
+- /* Virtualization features not supported on i210 family. */
+- if ((hw->mac.type == e1000_i210) || (hw->mac.type == e1000_i211))
+- return;
+-
+- pci_sriov_set_totalvfs(pdev, 7);
+- igb_pci_enable_sriov(pdev, max_vfs);
+-
+-#endif /* CONFIG_PCI_IOV */
+-}
+-
+-static void igb_init_queue_configuration(struct igb_adapter *adapter)
+-{
+- struct e1000_hw *hw = &adapter->hw;
+- u32 max_rss_queues;
+-
+- /* Determine the maximum number of RSS queues supported. */
+- switch (hw->mac.type) {
+- case e1000_i211:
+- max_rss_queues = IGB_MAX_RX_QUEUES_I211;
+- break;
+- case e1000_82575:
+- case e1000_i210:
+- max_rss_queues = IGB_MAX_RX_QUEUES_82575;
+- break;
+- case e1000_i350:
+- /* I350 cannot do RSS and SR-IOV at the same time */
+- if (!!adapter->vfs_allocated_count) {
+- max_rss_queues = 1;
+- break;
+- }
+- /* fall through */
+- case e1000_82576:
+- if (!!adapter->vfs_allocated_count) {
+- max_rss_queues = 2;
+- break;
+- }
+- /* fall through */
+- case e1000_82580:
+- case e1000_i354:
+- default:
+- max_rss_queues = IGB_MAX_RX_QUEUES;
+- break;
+- }
+-
+- adapter->rss_queues = min_t(u32, max_rss_queues, num_online_cpus());
+-
+- igb_set_flag_queue_pairs(adapter, max_rss_queues);
+-}
+-
+-void igb_set_flag_queue_pairs(struct igb_adapter *adapter,
+- const u32 max_rss_queues)
+-{
+- struct e1000_hw *hw = &adapter->hw;
+-
+- /* Determine if we need to pair queues. */
+- switch (hw->mac.type) {
+- case e1000_82575:
+- case e1000_i211:
+- /* Device supports enough interrupts without queue pairing. */
+- break;
+- case e1000_82576:
+- /* If VFs are going to be allocated with RSS queues then we
+- * should pair the queues in order to conserve interrupts due
+- * to limited supply.
+- */
+- if ((adapter->rss_queues > 1) &&
+- (adapter->vfs_allocated_count > 6))
+- adapter->flags |= IGB_FLAG_QUEUE_PAIRS;
+- /* fall through */
+- case e1000_82580:
+- case e1000_i350:
+- case e1000_i354:
+- case e1000_i210:
+- default:
+- /* If rss_queues > half of max_rss_queues, pair the queues in
+- * order to conserve interrupts due to limited supply.
+- */
+- if (adapter->rss_queues > (max_rss_queues / 2))
+- adapter->flags |= IGB_FLAG_QUEUE_PAIRS;
+- break;
+- }
+-}
+-
+-/**
+- * igb_sw_init - Initialize general software structures (struct igb_adapter)
+- * @adapter: board private structure to initialize
++ * igb_sw_init - Initialize general software structures (struct igb_adapter)
++ * @adapter: board private structure to initialize
+ *
+- * igb_sw_init initializes the Adapter private data structure.
+- * Fields are initialized based on PCI device information and
+- * OS network device settings (MTU size).
++ * igb_sw_init initializes the Adapter private data structure.
++ * Fields are initialized based on PCI device information and
++ * OS network device settings (MTU size).
+ **/
+ static int igb_sw_init(struct igb_adapter *adapter)
+ {
+@@ -2956,84 +3285,78 @@
+ struct net_device *netdev = adapter->netdev;
+ struct pci_dev *pdev = adapter->pdev;
+
++ /* PCI config space info */
++
++ hw->vendor_id = pdev->vendor;
++ hw->device_id = pdev->device;
++ hw->subsystem_vendor_id = pdev->subsystem_vendor;
++ hw->subsystem_device_id = pdev->subsystem_device;
++
++ pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
++
+ pci_read_config_word(pdev, PCI_COMMAND, &hw->bus.pci_cmd_word);
+
+ /* set default ring sizes */
+ adapter->tx_ring_count = IGB_DEFAULT_TXD;
+ adapter->rx_ring_count = IGB_DEFAULT_RXD;
+
+- /* set default ITR values */
+- adapter->rx_itr_setting = IGB_DEFAULT_ITR;
+- adapter->tx_itr_setting = IGB_DEFAULT_ITR;
+-
+ /* set default work limits */
+ adapter->tx_work_limit = IGB_DEFAULT_TX_WORK;
+
+ adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN +
+- VLAN_HLEN;
+- adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
++ VLAN_HLEN;
+
+- spin_lock_init(&adapter->stats64_lock);
+-#ifdef CONFIG_PCI_IOV
+- switch (hw->mac.type) {
+- case e1000_82576:
+- case e1000_i350:
+- if (max_vfs > 7) {
+- dev_warn(&pdev->dev,
+- "Maximum of 7 VFs per PF, using max\n");
+- max_vfs = adapter->vfs_allocated_count = 7;
+- } else
+- adapter->vfs_allocated_count = max_vfs;
+- if (adapter->vfs_allocated_count)
+- dev_warn(&pdev->dev,
+- "Enabling SR-IOV VFs using the module parameter is deprecated - please use the pci sysfs interface.\n");
+- break;
+- default:
+- break;
++ /* Initialize the hardware-specific values */
++ if (e1000_setup_init_funcs(hw, TRUE)) {
++ dev_err(pci_dev_to_dev(pdev), "Hardware Initialization Failure\n");
++ return -EIO;
+ }
+-#endif /* CONFIG_PCI_IOV */
+
+- igb_init_queue_configuration(adapter);
++ igb_check_options(adapter);
++
++ adapter->mac_table = kzalloc(sizeof(struct igb_mac_addr) *
++ hw->mac.rar_entry_count,
++ GFP_ATOMIC);
+
+ /* Setup and initialize a copy of the hw vlan table array */
+- adapter->shadow_vfta = kcalloc(E1000_VLAN_FILTER_TBL_SIZE, sizeof(u32),
+- GFP_ATOMIC);
++ adapter->shadow_vfta = kzalloc(sizeof(u32) * E1000_VFTA_ENTRIES,
++ GFP_ATOMIC);
++
++ /* These calls may decrease the number of queues */
++ if (hw->mac.type < e1000_i210)
++ igb_set_sriov_capability(adapter);
+
+- /* This call may decrease the number of queues */
+ if (igb_init_interrupt_scheme(adapter, true)) {
+- dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
++ dev_err(pci_dev_to_dev(pdev), "Unable to allocate memory for queues\n");
+ return -ENOMEM;
+ }
+
+- igb_probe_vfs(adapter);
+-
+ /* Explicitly disable IRQ since the NIC can be in any state. */
+ igb_irq_disable(adapter);
+
+- if (hw->mac.type >= e1000_i350)
+- adapter->flags &= ~IGB_FLAG_DMAC;
+-
+ set_bit(__IGB_DOWN, &adapter->state);
+ return 0;
+ }
+
+ /**
+- * igb_open - Called when a network interface is made active
+- * @netdev: network interface device structure
++ * igb_open - Called when a network interface is made active
++ * @netdev: network interface device structure
+ *
+- * Returns 0 on success, negative value on failure
++ * Returns 0 on success, negative value on failure
+ *
+- * The open entry point is called when a network interface is made
+- * active by the system (IFF_UP). At this point all resources needed
+- * for transmit and receive operations are allocated, the interrupt
+- * handler is registered with the OS, the watchdog timer is started,
+- * and the stack is notified that the interface is ready.
++ * The open entry point is called when a network interface is made
++ * active by the system (IFF_UP). At this point all resources needed
++ * for transmit and receive operations are allocated, the interrupt
++ * handler is registered with the OS, the watchdog timer is started,
++ * and the stack is notified that the interface is ready.
+ **/
+ static int __igb_open(struct net_device *netdev, bool resuming)
+ {
+ struct igb_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
++#ifdef CONFIG_PM_RUNTIME
+ struct pci_dev *pdev = adapter->pdev;
++#endif /* CONFIG_PM_RUNTIME */
+ int err;
+ int i;
+
+@@ -3043,8 +3366,10 @@
+ return -EBUSY;
+ }
+
++#ifdef CONFIG_PM_RUNTIME
+ if (!resuming)
+ pm_runtime_get_sync(&pdev->dev);
++#endif /* CONFIG_PM_RUNTIME */
+
+ netif_carrier_off(netdev);
+
+@@ -3072,12 +3397,12 @@
+ goto err_req_irq;
+
+ /* Notify the stack of the actual queue counts. */
+- err = netif_set_real_num_tx_queues(adapter->netdev,
+- adapter->num_tx_queues);
+- if (err)
+- goto err_set_queues;
++ netif_set_real_num_tx_queues(netdev,
++ adapter->vmdq_pools ? 1 :
++ adapter->num_tx_queues);
+
+- err = netif_set_real_num_rx_queues(adapter->netdev,
++ err = netif_set_real_num_rx_queues(netdev,
++ adapter->vmdq_pools ? 1 :
+ adapter->num_rx_queues);
+ if (err)
+ goto err_set_queues;
+@@ -3087,30 +3412,31 @@
+
+ for (i = 0; i < adapter->num_q_vectors; i++)
+ napi_enable(&(adapter->q_vector[i]->napi));
++ igb_configure_lli(adapter);
+
+ /* Clear any pending interrupts. */
+- rd32(E1000_ICR);
++ E1000_READ_REG(hw, E1000_ICR);
+
+ igb_irq_enable(adapter);
+
+ /* notify VFs that reset has been completed */
+ if (adapter->vfs_allocated_count) {
+- u32 reg_data = rd32(E1000_CTRL_EXT);
++ u32 reg_data = E1000_READ_REG(hw, E1000_CTRL_EXT);
+
+ reg_data |= E1000_CTRL_EXT_PFRSTD;
+- wr32(E1000_CTRL_EXT, reg_data);
++ E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg_data);
+ }
+
+ netif_tx_start_all_queues(netdev);
+
+- if (!resuming)
+- pm_runtime_put(&pdev->dev);
++ if (adapter->flags & IGB_FLAG_DETECT_BAD_DMA)
++ schedule_work(&adapter->dma_err_task);
+
+ /* start the watchdog. */
+ hw->mac.get_link_status = 1;
+ schedule_work(&adapter->watchdog_task);
+
+- return 0;
++ return E1000_SUCCESS;
+
+ err_set_queues:
+ igb_free_irq(adapter);
+@@ -3122,59 +3448,72 @@
+ igb_free_all_tx_resources(adapter);
+ err_setup_tx:
+ igb_reset(adapter);
++
++#ifdef CONFIG_PM_RUNTIME
+ if (!resuming)
+ pm_runtime_put(&pdev->dev);
++#endif /* CONFIG_PM_RUNTIME */
+
+ return err;
+ }
+
+-static int igb_open(struct net_device *netdev)
++int igb_open(struct net_device *netdev)
+ {
+ return __igb_open(netdev, false);
+ }
+
+ /**
+- * igb_close - Disables a network interface
+- * @netdev: network interface device structure
++ * igb_close - Disables a network interface
++ * @netdev: network interface device structure
+ *
+- * Returns 0, this is not allowed to fail
++ * Returns 0, this is not allowed to fail
+ *
+- * The close entry point is called when an interface is de-activated
+- * by the OS. The hardware is still under the driver's control, but
+- * needs to be disabled. A global MAC reset is issued to stop the
+- * hardware, and all transmit and receive resources are freed.
++ * The close entry point is called when an interface is de-activated
++ * by the OS. The hardware is still under the driver's control, but
++ * needs to be disabled. A global MAC reset is issued to stop the
++ * hardware, and all transmit and receive resources are freed.
+ **/
+ static int __igb_close(struct net_device *netdev, bool suspending)
+ {
+ struct igb_adapter *adapter = netdev_priv(netdev);
++#ifdef CONFIG_PM_RUNTIME
+ struct pci_dev *pdev = adapter->pdev;
++#endif /* CONFIG_PM_RUNTIME */
+
+ WARN_ON(test_bit(__IGB_RESETTING, &adapter->state));
+
++#ifdef CONFIG_PM_RUNTIME
+ if (!suspending)
+ pm_runtime_get_sync(&pdev->dev);
++#endif /* CONFIG_PM_RUNTIME */
+
+ igb_down(adapter);
++
++ igb_release_hw_control(adapter);
++
+ igb_free_irq(adapter);
+
+ igb_free_all_tx_resources(adapter);
+ igb_free_all_rx_resources(adapter);
+
++#ifdef CONFIG_PM_RUNTIME
+ if (!suspending)
+ pm_runtime_put_sync(&pdev->dev);
++#endif /* CONFIG_PM_RUNTIME */
++
+ return 0;
+ }
+
+-static int igb_close(struct net_device *netdev)
++int igb_close(struct net_device *netdev)
+ {
+ return __igb_close(netdev, false);
+ }
+
+ /**
+- * igb_setup_tx_resources - allocate Tx resources (Descriptors)
+- * @tx_ring: tx descriptor ring (for a specific queue) to setup
++ * igb_setup_tx_resources - allocate Tx resources (Descriptors)
++ * @tx_ring: tx descriptor ring (for a specific queue) to setup
+ *
+- * Return 0 on success, negative on failure
++ * Return 0 on success, negative on failure
+ **/
+ int igb_setup_tx_resources(struct igb_ring *tx_ring)
+ {
+@@ -3182,7 +3521,6 @@
+ int size;
+
+ size = sizeof(struct igb_tx_buffer) * tx_ring->count;
+-
+ tx_ring->tx_buffer_info = vzalloc(size);
+ if (!tx_ring->tx_buffer_info)
+ goto err;
+@@ -3193,6 +3531,7 @@
+
+ tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
+ &tx_ring->dma, GFP_KERNEL);
++
+ if (!tx_ring->desc)
+ goto err;
+
+@@ -3203,17 +3542,17 @@
+
+ err:
+ vfree(tx_ring->tx_buffer_info);
+- tx_ring->tx_buffer_info = NULL;
+- dev_err(dev, "Unable to allocate memory for the Tx descriptor ring\n");
++ dev_err(dev,
++ "Unable to allocate memory for the transmit descriptor ring\n");
+ return -ENOMEM;
+ }
+
+ /**
+- * igb_setup_all_tx_resources - wrapper to allocate Tx resources
+- * (Descriptors) for all queues
+- * @adapter: board private structure
++ * igb_setup_all_tx_resources - wrapper to allocate Tx resources
++ * (Descriptors) for all queues
++ * @adapter: board private structure
+ *
+- * Return 0 on success, negative on failure
++ * Return 0 on success, negative on failure
+ **/
+ static int igb_setup_all_tx_resources(struct igb_adapter *adapter)
+ {
+@@ -3223,7 +3562,7 @@
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ err = igb_setup_tx_resources(adapter->tx_ring[i]);
+ if (err) {
+- dev_err(&pdev->dev,
++ dev_err(pci_dev_to_dev(pdev),
+ "Allocation for Tx Queue %u failed\n", i);
+ for (i--; i >= 0; i--)
+ igb_free_tx_resources(adapter->tx_ring[i]);
+@@ -3235,8 +3574,8 @@
+ }
+
+ /**
+- * igb_setup_tctl - configure the transmit control registers
+- * @adapter: Board private structure
++ * igb_setup_tctl - configure the transmit control registers
++ * @adapter: Board private structure
+ **/
+ void igb_setup_tctl(struct igb_adapter *adapter)
+ {
+@@ -3244,28 +3583,45 @@
+ u32 tctl;
+
+ /* disable queue 0 which is enabled by default on 82575 and 82576 */
+- wr32(E1000_TXDCTL(0), 0);
++ E1000_WRITE_REG(hw, E1000_TXDCTL(0), 0);
+
+ /* Program the Transmit Control Register */
+- tctl = rd32(E1000_TCTL);
++ tctl = E1000_READ_REG(hw, E1000_TCTL);
+ tctl &= ~E1000_TCTL_CT;
+ tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
+ (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
+
+- igb_config_collision_dist(hw);
++ igb_e1000_config_collision_dist(hw);
+
+ /* Enable transmits */
+ tctl |= E1000_TCTL_EN;
+
+- wr32(E1000_TCTL, tctl);
++ E1000_WRITE_REG(hw, E1000_TCTL, tctl);
++}
++
++static u32 igb_tx_wthresh(struct igb_adapter *adapter)
++{
++ struct e1000_hw *hw = &adapter->hw;
++
++ switch (hw->mac.type) {
++ case e1000_i354:
++ return 4;
++ case e1000_82576:
++ if (adapter->msix_entries)
++ return 1;
++ default:
++ break;
++ }
++
++ return 16;
+ }
+
+ /**
+- * igb_configure_tx_ring - Configure transmit ring after Reset
+- * @adapter: board private structure
+- * @ring: tx ring to configure
++ * igb_configure_tx_ring - Configure transmit ring after Reset
++ * @adapter: board private structure
++ * @ring: tx ring to configure
+ *
+- * Configure a transmit ring after a reset.
++ * Configure a transmit ring after a reset.
+ **/
+ void igb_configure_tx_ring(struct igb_adapter *adapter,
+ struct igb_ring *ring)
+@@ -3276,33 +3632,33 @@
+ int reg_idx = ring->reg_idx;
+
+ /* disable the queue */
+- wr32(E1000_TXDCTL(reg_idx), 0);
+- wrfl();
++ E1000_WRITE_REG(hw, E1000_TXDCTL(reg_idx), 0);
++ E1000_WRITE_FLUSH(hw);
+ mdelay(10);
+
+- wr32(E1000_TDLEN(reg_idx),
+- ring->count * sizeof(union e1000_adv_tx_desc));
+- wr32(E1000_TDBAL(reg_idx),
+- tdba & 0x00000000ffffffffULL);
+- wr32(E1000_TDBAH(reg_idx), tdba >> 32);
++ E1000_WRITE_REG(hw, E1000_TDLEN(reg_idx),
++ ring->count * sizeof(union e1000_adv_tx_desc));
++ E1000_WRITE_REG(hw, E1000_TDBAL(reg_idx),
++ tdba & 0x00000000ffffffffULL);
++ E1000_WRITE_REG(hw, E1000_TDBAH(reg_idx), tdba >> 32);
+
+- ring->tail = hw->hw_addr + E1000_TDT(reg_idx);
+- wr32(E1000_TDH(reg_idx), 0);
++ ring->tail = adapter->io_addr + E1000_TDT(reg_idx);
++ E1000_WRITE_REG(hw, E1000_TDH(reg_idx), 0);
+ writel(0, ring->tail);
+
+ txdctl |= IGB_TX_PTHRESH;
+ txdctl |= IGB_TX_HTHRESH << 8;
+- txdctl |= IGB_TX_WTHRESH << 16;
++ txdctl |= igb_tx_wthresh(adapter) << 16;
+
+ txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
+- wr32(E1000_TXDCTL(reg_idx), txdctl);
++ E1000_WRITE_REG(hw, E1000_TXDCTL(reg_idx), txdctl);
+ }
+
+ /**
+- * igb_configure_tx - Configure transmit Unit after Reset
+- * @adapter: board private structure
++ * igb_configure_tx - Configure transmit Unit after Reset
++ * @adapter: board private structure
+ *
+- * Configure the Tx unit of the MAC after a reset.
++ * Configure the Tx unit of the MAC after a reset.
+ **/
+ static void igb_configure_tx(struct igb_adapter *adapter)
+ {
+@@ -3313,28 +3669,30 @@
+ }
+
+ /**
+- * igb_setup_rx_resources - allocate Rx resources (Descriptors)
+- * @rx_ring: Rx descriptor ring (for a specific queue) to setup
++ * igb_setup_rx_resources - allocate Rx resources (Descriptors)
++ * @rx_ring: rx descriptor ring (for a specific queue) to setup
+ *
+- * Returns 0 on success, negative on failure
++ * Returns 0 on success, negative on failure
+ **/
+ int igb_setup_rx_resources(struct igb_ring *rx_ring)
+ {
+ struct device *dev = rx_ring->dev;
+- int size;
++ int size, desc_len;
+
+ size = sizeof(struct igb_rx_buffer) * rx_ring->count;
+-
+ rx_ring->rx_buffer_info = vzalloc(size);
+ if (!rx_ring->rx_buffer_info)
+ goto err;
+
++ desc_len = sizeof(union e1000_adv_rx_desc);
++
+ /* Round up to nearest 4K */
+- rx_ring->size = rx_ring->count * sizeof(union e1000_adv_rx_desc);
++ rx_ring->size = rx_ring->count * desc_len;
+ rx_ring->size = ALIGN(rx_ring->size, 4096);
+
+ rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
+ &rx_ring->dma, GFP_KERNEL);
++
+ if (!rx_ring->desc)
+ goto err;
+
+@@ -3347,16 +3705,17 @@
+ err:
+ vfree(rx_ring->rx_buffer_info);
+ rx_ring->rx_buffer_info = NULL;
+- dev_err(dev, "Unable to allocate memory for the Rx descriptor ring\n");
++ dev_err(dev,
++ "Unable to allocate memory for the receive descriptor ring\n");
+ return -ENOMEM;
+ }
+
+ /**
+- * igb_setup_all_rx_resources - wrapper to allocate Rx resources
+- * (Descriptors) for all queues
+- * @adapter: board private structure
++ * igb_setup_all_rx_resources - wrapper to allocate Rx resources
++ * (Descriptors) for all queues
++ * @adapter: board private structure
+ *
+- * Return 0 on success, negative on failure
++ * Return 0 on success, negative on failure
+ **/
+ static int igb_setup_all_rx_resources(struct igb_adapter *adapter)
+ {
+@@ -3366,7 +3725,7 @@
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ err = igb_setup_rx_resources(adapter->rx_ring[i]);
+ if (err) {
+- dev_err(&pdev->dev,
++ dev_err(pci_dev_to_dev(pdev),
+ "Allocation for Rx Queue %u failed\n", i);
+ for (i--; i >= 0; i--)
+ igb_free_rx_resources(adapter->rx_ring[i]);
+@@ -3378,14 +3737,17 @@
+ }
+
+ /**
+- * igb_setup_mrqc - configure the multiple receive queue control registers
+- * @adapter: Board private structure
++ * igb_setup_mrqc - configure the multiple receive queue control registers
++ * @adapter: Board private structure
+ **/
+ static void igb_setup_mrqc(struct igb_adapter *adapter)
+ {
+ struct e1000_hw *hw = &adapter->hw;
+ u32 mrqc, rxcsum;
+ u32 j, num_rx_queues;
++#ifndef ETHTOOL_SRXFHINDIR
++ u32 shift = 0, shift2 = 0;
++#endif /* ETHTOOL_SRXFHINDIR */
+ static const u32 rsskey[10] = { 0xDA565A6D, 0xC20E5B25, 0x3D256741,
+ 0xB08FA343, 0xCB2BCAD0, 0xB4307BAE,
+ 0xA32DCB77, 0x0CF23080, 0x3BB7426A,
+@@ -3393,33 +3755,72 @@
+
+ /* Fill out hash function seeds */
+ for (j = 0; j < 10; j++)
+- wr32(E1000_RSSRK(j), rsskey[j]);
++ E1000_WRITE_REG(hw, E1000_RSSRK(j), rsskey[j]);
+
+ num_rx_queues = adapter->rss_queues;
+
+- switch (hw->mac.type) {
+- case e1000_82576:
++#ifdef ETHTOOL_SRXFHINDIR
++ if (hw->mac.type == e1000_82576) {
+ /* 82576 supports 2 RSS queues for SR-IOV */
+ if (adapter->vfs_allocated_count)
+ num_rx_queues = 2;
+- break;
+- default:
+- break;
+ }
+-
+ if (adapter->rss_indir_tbl_init != num_rx_queues) {
+ for (j = 0; j < IGB_RETA_SIZE; j++)
+ adapter->rss_indir_tbl[j] =
+- (j * num_rx_queues) / IGB_RETA_SIZE;
++ (j * num_rx_queues) / IGB_RETA_SIZE;
+ adapter->rss_indir_tbl_init = num_rx_queues;
+ }
+ igb_write_rss_indir_tbl(adapter);
++#else
++ /* 82575 and 82576 supports 2 RSS queues for VMDq */
++ switch (hw->mac.type) {
++ case e1000_82575:
++ if (adapter->vmdq_pools) {
++ shift = 2;
++ shift2 = 6;
++ }
++ shift = 6;
++ break;
++ case e1000_82576:
++ /* 82576 supports 2 RSS queues for SR-IOV */
++ if (adapter->vfs_allocated_count || adapter->vmdq_pools) {
++ shift = 3;
++ num_rx_queues = 2;
++ }
++ break;
++ default:
++ break;
++ }
++
++ /*
++ * Populate the redirection table 4 entries at a time. To do this
++ * we are generating the results for n and n+2 and then interleaving
++ * those with the results with n+1 and n+3.
++ */
++ for (j = 0; j < 32; j++) {
++ /* first pass generates n and n+2 */
++ u32 base = ((j * 0x00040004) + 0x00020000) * num_rx_queues;
++ u32 reta = (base & 0x07800780) >> (7 - shift);
++
++ /* second pass generates n+1 and n+3 */
++ base += 0x00010001 * num_rx_queues;
++ reta |= (base & 0x07800780) << (1 + shift);
++
++ /* generate 2nd table for 82575 based parts */
++ if (shift2)
++ reta |= (0x01010101 * num_rx_queues) << shift2;
++
++ E1000_WRITE_REG(hw, E1000_RETA(j), reta);
++ }
++#endif /* ETHTOOL_SRXFHINDIR */
+
+- /* Disable raw packet checksumming so that RSS hash is placed in
++ /*
++ * Disable raw packet checksumming so that RSS hash is placed in
+ * descriptor on writeback. No need to enable TCP/UDP/IP checksum
+ * offloads as they are enabled by default
+ */
+- rxcsum = rd32(E1000_RXCSUM);
++ rxcsum = E1000_READ_REG(hw, E1000_RXCSUM);
+ rxcsum |= E1000_RXCSUM_PCSD;
+
+ if (adapter->hw.mac.type >= e1000_82576)
+@@ -3427,7 +3828,7 @@
+ rxcsum |= E1000_RXCSUM_CRCOFL;
+
+ /* Don't need to set TUOFL or IPOFL, they default to 1 */
+- wr32(E1000_RXCSUM, rxcsum);
++ E1000_WRITE_REG(hw, E1000_RXCSUM, rxcsum);
+
+ /* Generate RSS hash based on packet types, TCP/UDP
+ * port numbers and/or IPv4/v6 src and dst addresses
+@@ -3447,40 +3848,39 @@
+ * we default to RSS so that an RSS hash is calculated per packet even
+ * if we are only using one queue
+ */
+- if (adapter->vfs_allocated_count) {
++ if (adapter->vfs_allocated_count || adapter->vmdq_pools) {
+ if (hw->mac.type > e1000_82575) {
+ /* Set the default pool for the PF's first queue */
+- u32 vtctl = rd32(E1000_VT_CTL);
++ u32 vtctl = E1000_READ_REG(hw, E1000_VT_CTL);
+
+ vtctl &= ~(E1000_VT_CTL_DEFAULT_POOL_MASK |
+ E1000_VT_CTL_DISABLE_DEF_POOL);
+ vtctl |= adapter->vfs_allocated_count <<
+ E1000_VT_CTL_DEFAULT_POOL_SHIFT;
+- wr32(E1000_VT_CTL, vtctl);
++ E1000_WRITE_REG(hw, E1000_VT_CTL, vtctl);
+ }
+ if (adapter->rss_queues > 1)
+ mrqc |= E1000_MRQC_ENABLE_VMDQ_RSS_2Q;
+ else
+ mrqc |= E1000_MRQC_ENABLE_VMDQ;
+ } else {
+- if (hw->mac.type != e1000_i211)
+- mrqc |= E1000_MRQC_ENABLE_RSS_4Q;
++ mrqc |= E1000_MRQC_ENABLE_RSS_4Q;
+ }
+ igb_vmm_control(adapter);
+
+- wr32(E1000_MRQC, mrqc);
++ E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
+ }
+
+ /**
+- * igb_setup_rctl - configure the receive control registers
+- * @adapter: Board private structure
++ * igb_setup_rctl - configure the receive control registers
++ * @adapter: Board private structure
+ **/
+ void igb_setup_rctl(struct igb_adapter *adapter)
+ {
+ struct e1000_hw *hw = &adapter->hw;
+ u32 rctl;
+
+- rctl = rd32(E1000_RCTL);
++ rctl = E1000_READ_REG(hw, E1000_RCTL);
+
+ rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
+ rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC);
+@@ -3488,7 +3888,8 @@
+ rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_RDMTS_HALF |
+ (hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
+
+- /* enable stripping of CRC. It's unlikely this will break BMC
++ /*
++ * enable stripping of CRC. It's unlikely this will break BMC
+ * redirection as it did with e1000. Newer features require
+ * that the HW strips the CRC.
+ */
+@@ -3501,7 +3902,7 @@
+ rctl |= E1000_RCTL_LPE;
+
+ /* disable queue 0 to prevent tail write w/o re-config */
+- wr32(E1000_RXDCTL(0), 0);
++ E1000_WRITE_REG(hw, E1000_RXDCTL(0), 0);
+
+ /* Attention!!! For SR-IOV PF driver operations you must enable
+ * queue drop for all VF and PF queues to prevent head of line blocking
+@@ -3509,27 +3910,10 @@
+ */
+ if (adapter->vfs_allocated_count) {
+ /* set all queue drop enable bits */
+- wr32(E1000_QDE, ALL_QUEUES);
+- }
+-
+- /* This is useful for sniffing bad packets. */
+- if (adapter->netdev->features & NETIF_F_RXALL) {
+- /* UPE and MPE will be handled by normal PROMISC logic
+- * in e1000e_set_rx_mode
+- */
+- rctl |= (E1000_RCTL_SBP | /* Receive bad packets */
+- E1000_RCTL_BAM | /* RX All Bcast Pkts */
+- E1000_RCTL_PMCF); /* RX All MAC Ctrl Pkts */
+-
+- rctl &= ~(E1000_RCTL_VFE | /* Disable VLAN filter */
+- E1000_RCTL_DPF | /* Allow filtered pause */
+- E1000_RCTL_CFIEN); /* Dis VLAN CFIEN Filter */
+- /* Do not mess with E1000_CTRL_VME, it affects transmit as well,
+- * and that breaks VLANs.
+- */
++ E1000_WRITE_REG(hw, E1000_QDE, ALL_QUEUES);
+ }
+
+- wr32(E1000_RCTL, rctl);
++ E1000_WRITE_REG(hw, E1000_RCTL, rctl);
+ }
+
+ static inline int igb_set_vf_rlpml(struct igb_adapter *adapter, int size,
+@@ -3543,21 +3927,31 @@
+ */
+ if (vfn < adapter->vfs_allocated_count &&
+ adapter->vf_data[vfn].vlans_enabled)
+- size += VLAN_TAG_SIZE;
++ size += VLAN_HLEN;
+
+- vmolr = rd32(E1000_VMOLR(vfn));
++#ifdef CONFIG_IGB_VMDQ_NETDEV
++ if (vfn >= adapter->vfs_allocated_count) {
++ int queue = vfn - adapter->vfs_allocated_count;
++ struct igb_vmdq_adapter *vadapter;
++
++ vadapter = netdev_priv(adapter->vmdq_netdev[queue-1]);
++ if (vadapter->vlgrp)
++ size += VLAN_HLEN;
++ }
++#endif
++ vmolr = E1000_READ_REG(hw, E1000_VMOLR(vfn));
+ vmolr &= ~E1000_VMOLR_RLPML_MASK;
+ vmolr |= size | E1000_VMOLR_LPE;
+- wr32(E1000_VMOLR(vfn), vmolr);
++ E1000_WRITE_REG(hw, E1000_VMOLR(vfn), vmolr);
+
+ return 0;
+ }
+
+ /**
+- * igb_rlpml_set - set maximum receive packet size
+- * @adapter: board private structure
++ * igb_rlpml_set - set maximum receive packet size
++ * @adapter: board private structure
+ *
+- * Configure maximum receivable packet size.
++ * Configure maximum receivable packet size.
+ **/
+ static void igb_rlpml_set(struct igb_adapter *adapter)
+ {
+@@ -3565,9 +3959,13 @@
+ struct e1000_hw *hw = &adapter->hw;
+ u16 pf_id = adapter->vfs_allocated_count;
+
+- if (pf_id) {
+- igb_set_vf_rlpml(adapter, max_frame_size, pf_id);
+- /* If we're in VMDQ or SR-IOV mode, then set global RLPML
++ if (adapter->vmdq_pools && hw->mac.type != e1000_82575) {
++ int i;
++
++ for (i = 0; i < adapter->vmdq_pools; i++)
++ igb_set_vf_rlpml(adapter, max_frame_size, pf_id + i);
++ /*
++ * If we're in VMDQ or SR-IOV mode, then set global RLPML
+ * to our max jumbo frame size, in case we need to enable
+ * jumbo frames on one of the rings later.
+ * This will not pass over-length frames into the default
+@@ -3575,56 +3973,73 @@
+ */
+ max_frame_size = MAX_JUMBO_FRAME_SIZE;
+ }
++ /* Set VF RLPML for the PF device. */
++ if (adapter->vfs_allocated_count)
++ igb_set_vf_rlpml(adapter, max_frame_size, pf_id);
+
+- wr32(E1000_RLPML, max_frame_size);
++ E1000_WRITE_REG(hw, E1000_RLPML, max_frame_size);
+ }
+
++static inline void igb_set_vf_vlan_strip(struct igb_adapter *adapter,
++ int vfn, bool enable)
++{
++ struct e1000_hw *hw = &adapter->hw;
++ u32 val;
++ void __iomem *reg;
++
++ if (hw->mac.type < e1000_82576)
++ return;
++
++ if (hw->mac.type == e1000_i350)
++ reg = hw->hw_addr + E1000_DVMOLR(vfn);
++ else
++ reg = hw->hw_addr + E1000_VMOLR(vfn);
++
++ val = readl(reg);
++ if (enable)
++ val |= E1000_VMOLR_STRVLAN;
++ else
++ val &= ~(E1000_VMOLR_STRVLAN);
++ writel(val, reg);
++}
+ static inline void igb_set_vmolr(struct igb_adapter *adapter,
+ int vfn, bool aupe)
+ {
+ struct e1000_hw *hw = &adapter->hw;
+ u32 vmolr;
+
+- /* This register exists only on 82576 and newer so if we are older then
++ /*
++ * This register exists only on 82576 and newer so if we are older then
+ * we should exit and do nothing
+ */
+ if (hw->mac.type < e1000_82576)
+ return;
+
+- vmolr = rd32(E1000_VMOLR(vfn));
+- vmolr |= E1000_VMOLR_STRVLAN; /* Strip vlan tags */
+- if (hw->mac.type == e1000_i350) {
+- u32 dvmolr;
++ vmolr = E1000_READ_REG(hw, E1000_VMOLR(vfn));
+
+- dvmolr = rd32(E1000_DVMOLR(vfn));
+- dvmolr |= E1000_DVMOLR_STRVLAN;
+- wr32(E1000_DVMOLR(vfn), dvmolr);
+- }
+ if (aupe)
+- vmolr |= E1000_VMOLR_AUPE; /* Accept untagged packets */
++ vmolr |= E1000_VMOLR_AUPE; /* Accept untagged packets */
+ else
+ vmolr &= ~(E1000_VMOLR_AUPE); /* Tagged packets ONLY */
+
+ /* clear all bits that might not be set */
+- vmolr &= ~(E1000_VMOLR_BAM | E1000_VMOLR_RSSE);
++ vmolr &= ~E1000_VMOLR_RSSE;
+
+ if (adapter->rss_queues > 1 && vfn == adapter->vfs_allocated_count)
+ vmolr |= E1000_VMOLR_RSSE; /* enable RSS */
+- /* for VMDq only allow the VFs and pool 0 to accept broadcast and
+- * multicast packets
+- */
+- if (vfn <= adapter->vfs_allocated_count)
+- vmolr |= E1000_VMOLR_BAM; /* Accept broadcast */
+
+- wr32(E1000_VMOLR(vfn), vmolr);
++ vmolr |= E1000_VMOLR_BAM; /* Accept broadcast */
++ vmolr |= E1000_VMOLR_LPE; /* Accept long packets */
++
++ E1000_WRITE_REG(hw, E1000_VMOLR(vfn), vmolr);
+ }
+
+ /**
+- * igb_configure_rx_ring - Configure a receive ring after Reset
+- * @adapter: board private structure
+- * @ring: receive ring to be configured
++ * igb_configure_rx_ring - Configure a receive ring after Reset
++ * @adapter: board private structure
++ * @ring: receive ring to be configured
+ *
+- * Configure the Rx unit of the MAC after a reset.
++ * Configure the Rx unit of the MAC after a reset.
+ **/
+ void igb_configure_rx_ring(struct igb_adapter *adapter,
+ struct igb_ring *ring)
+@@ -3634,32 +4049,67 @@
+ int reg_idx = ring->reg_idx;
+ u32 srrctl = 0, rxdctl = 0;
+
++#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT
++ /*
++ * RLPML prevents us from receiving a frame larger than max_frame so
++ * it is safe to just set the rx_buffer_len to max_frame without the
++ * risk of an skb over panic.
++ */
++ ring->rx_buffer_len = max_t(u32, adapter->max_frame_size,
++ MAXIMUM_ETHERNET_VLAN_SIZE);
++
++#endif
+ /* disable the queue */
+- wr32(E1000_RXDCTL(reg_idx), 0);
++ E1000_WRITE_REG(hw, E1000_RXDCTL(reg_idx), 0);
+
+ /* Set DMA base address registers */
+- wr32(E1000_RDBAL(reg_idx),
+- rdba & 0x00000000ffffffffULL);
+- wr32(E1000_RDBAH(reg_idx), rdba >> 32);
+- wr32(E1000_RDLEN(reg_idx),
+- ring->count * sizeof(union e1000_adv_rx_desc));
++ E1000_WRITE_REG(hw, E1000_RDBAL(reg_idx),
++ rdba & 0x00000000ffffffffULL);
++ E1000_WRITE_REG(hw, E1000_RDBAH(reg_idx), rdba >> 32);
++ E1000_WRITE_REG(hw, E1000_RDLEN(reg_idx),
++ ring->count * sizeof(union e1000_adv_rx_desc));
+
+ /* initialize head and tail */
+- ring->tail = hw->hw_addr + E1000_RDT(reg_idx);
+- wr32(E1000_RDH(reg_idx), 0);
++ ring->tail = adapter->io_addr + E1000_RDT(reg_idx);
++ E1000_WRITE_REG(hw, E1000_RDH(reg_idx), 0);
+ writel(0, ring->tail);
+
++ /* reset next-to- use/clean to place SW in sync with hardwdare */
++ ring->next_to_clean = 0;
++ ring->next_to_use = 0;
++#ifndef CONFIG_IGB_DISABLE_PACKET_SPLIT
++ ring->next_to_alloc = 0;
++
++#endif
+ /* set descriptor configuration */
++#ifndef CONFIG_IGB_DISABLE_PACKET_SPLIT
+ srrctl = IGB_RX_HDR_LEN << E1000_SRRCTL_BSIZEHDRSIZE_SHIFT;
+ srrctl |= IGB_RX_BUFSZ >> E1000_SRRCTL_BSIZEPKT_SHIFT;
++#else /* CONFIG_IGB_DISABLE_PACKET_SPLIT */
++ srrctl = ALIGN(ring->rx_buffer_len, 1024) >>
++ E1000_SRRCTL_BSIZEPKT_SHIFT;
++#endif /* CONFIG_IGB_DISABLE_PACKET_SPLIT */
+ srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
++#ifdef HAVE_PTP_1588_CLOCK
+ if (hw->mac.type >= e1000_82580)
+ srrctl |= E1000_SRRCTL_TIMESTAMP;
+- /* Only set Drop Enable if we are supporting multiple queues */
+- if (adapter->vfs_allocated_count || adapter->num_rx_queues > 1)
++#endif /* HAVE_PTP_1588_CLOCK */
++ /*
++ * We should set the drop enable bit if:
++ * SR-IOV is enabled
++ * or
++ * Flow Control is disabled and number of RX queues > 1
++ *
++ * This allows us to avoid head of line blocking for security
++ * and performance reasons.
++ */
++ if (adapter->vfs_allocated_count ||
++ (adapter->num_rx_queues > 1 &&
++ (hw->fc.requested_mode == e1000_fc_none ||
++ hw->fc.requested_mode == e1000_fc_rx_pause)))
+ srrctl |= E1000_SRRCTL_DROP_EN;
+
+- wr32(E1000_SRRCTL(reg_idx), srrctl);
++ E1000_WRITE_REG(hw, E1000_SRRCTL(reg_idx), srrctl);
+
+ /* set filtering for VMDQ pools */
+ igb_set_vmolr(adapter, reg_idx & 0x7, true);
+@@ -3670,14 +4120,14 @@
+
+ /* enable receive descriptor fetching */
+ rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
+- wr32(E1000_RXDCTL(reg_idx), rxdctl);
++ E1000_WRITE_REG(hw, E1000_RXDCTL(reg_idx), rxdctl);
+ }
+
+ /**
+- * igb_configure_rx - Configure receive Unit after Reset
+- * @adapter: board private structure
++ * igb_configure_rx - Configure receive Unit after Reset
++ * @adapter: board private structure
+ *
+- * Configure the Rx unit of the MAC after a reset.
++ * Configure the Rx unit of the MAC after a reset.
+ **/
+ static void igb_configure_rx(struct igb_adapter *adapter)
+ {
+@@ -3698,10 +4148,10 @@
+ }
+
+ /**
+- * igb_free_tx_resources - Free Tx Resources per Queue
+- * @tx_ring: Tx descriptor ring for a specific queue
++ * igb_free_tx_resources - Free Tx Resources per Queue
++ * @tx_ring: Tx descriptor ring for a specific queue
+ *
+- * Free all transmit software resources
++ * Free all transmit software resources
+ **/
+ void igb_free_tx_resources(struct igb_ring *tx_ring)
+ {
+@@ -3721,10 +4171,10 @@
+ }
+
+ /**
+- * igb_free_all_tx_resources - Free Tx Resources for All Queues
+- * @adapter: board private structure
++ * igb_free_all_tx_resources - Free Tx Resources for All Queues
++ * @adapter: board private structure
+ *
+- * Free all transmit software resources
++ * Free all transmit software resources
+ **/
+ static void igb_free_all_tx_resources(struct igb_adapter *adapter)
+ {
+@@ -3746,9 +4196,9 @@
+ DMA_TO_DEVICE);
+ } else if (dma_unmap_len(tx_buffer, len)) {
+ dma_unmap_page(ring->dev,
+- dma_unmap_addr(tx_buffer, dma),
+- dma_unmap_len(tx_buffer, len),
+- DMA_TO_DEVICE);
++ dma_unmap_addr(tx_buffer, dma),
++ dma_unmap_len(tx_buffer, len),
++ DMA_TO_DEVICE);
+ }
+ tx_buffer->next_to_watch = NULL;
+ tx_buffer->skb = NULL;
+@@ -3757,8 +4207,8 @@
+ }
+
+ /**
+- * igb_clean_tx_ring - Free Tx Buffers
+- * @tx_ring: ring to be cleaned
++ * igb_clean_tx_ring - Free Tx Buffers
++ * @tx_ring: ring to be cleaned
+ **/
+ static void igb_clean_tx_ring(struct igb_ring *tx_ring)
+ {
+@@ -3788,8 +4238,8 @@
+ }
+
+ /**
+- * igb_clean_all_tx_rings - Free Tx Buffers for all queues
+- * @adapter: board private structure
++ * igb_clean_all_tx_rings - Free Tx Buffers for all queues
++ * @adapter: board private structure
+ **/
+ static void igb_clean_all_tx_rings(struct igb_adapter *adapter)
+ {
+@@ -3800,10 +4250,10 @@
+ }
+
+ /**
+- * igb_free_rx_resources - Free Rx Resources
+- * @rx_ring: ring to clean the resources from
++ * igb_free_rx_resources - Free Rx Resources
++ * @rx_ring: ring to clean the resources from
+ *
+- * Free all receive software resources
++ * Free all receive software resources
+ **/
+ void igb_free_rx_resources(struct igb_ring *rx_ring)
+ {
+@@ -3823,10 +4273,10 @@
+ }
+
+ /**
+- * igb_free_all_rx_resources - Free Rx Resources for All Queues
+- * @adapter: board private structure
++ * igb_free_all_rx_resources - Free Rx Resources for All Queues
++ * @adapter: board private structure
+ *
+- * Free all receive software resources
++ * Free all receive software resources
+ **/
+ static void igb_free_all_rx_resources(struct igb_adapter *adapter)
+ {
+@@ -3837,25 +4287,40 @@
+ }
+
+ /**
+- * igb_clean_rx_ring - Free Rx Buffers per Queue
+- * @rx_ring: ring to free buffers from
++ * igb_clean_rx_ring - Free Rx Buffers per Queue
++ * @rx_ring: ring to free buffers from
+ **/
+-static void igb_clean_rx_ring(struct igb_ring *rx_ring)
++void igb_clean_rx_ring(struct igb_ring *rx_ring)
+ {
+ unsigned long size;
+ u16 i;
+
++ if (!rx_ring->rx_buffer_info)
++ return;
++
++#ifndef CONFIG_IGB_DISABLE_PACKET_SPLIT
+ if (rx_ring->skb)
+ dev_kfree_skb(rx_ring->skb);
+ rx_ring->skb = NULL;
+
+- if (!rx_ring->rx_buffer_info)
+- return;
+-
++#endif
+ /* Free all the Rx ring sk_buffs */
+ for (i = 0; i < rx_ring->count; i++) {
+ struct igb_rx_buffer *buffer_info = &rx_ring->rx_buffer_info[i];
++#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT
++ if (buffer_info->dma) {
++ dma_unmap_single(rx_ring->dev,
++ buffer_info->dma,
++ rx_ring->rx_buffer_len,
++ DMA_FROM_DEVICE);
++ buffer_info->dma = 0;
++ }
+
++ if (buffer_info->skb) {
++ dev_kfree_skb(buffer_info->skb);
++ buffer_info->skb = NULL;
++ }
++#else
+ if (!buffer_info->page)
+ continue;
+
+@@ -3866,6 +4331,7 @@
+ __free_page(buffer_info->page);
+
+ buffer_info->page = NULL;
++#endif
+ }
+
+ size = sizeof(struct igb_rx_buffer) * rx_ring->count;
+@@ -3880,8 +4346,8 @@
+ }
+
+ /**
+- * igb_clean_all_rx_rings - Free Rx Buffers for all queues
+- * @adapter: board private structure
++ * igb_clean_all_rx_rings - Free Rx Buffers for all queues
++ * @adapter: board private structure
+ **/
+ static void igb_clean_all_rx_rings(struct igb_adapter *adapter)
+ {
+@@ -3892,11 +4358,11 @@
+ }
+
+ /**
+- * igb_set_mac - Change the Ethernet Address of the NIC
+- * @netdev: network interface device structure
+- * @p: pointer to an address structure
++ * igb_set_mac - Change the Ethernet Address of the NIC
++ * @netdev: network interface device structure
++ * @p: pointer to an address structure
+ *
+- * Returns 0 on success, negative on failure
++ * Returns 0 on success, negative on failure
+ **/
+ static int igb_set_mac(struct net_device *netdev, void *p)
+ {
+@@ -3910,60 +4376,155 @@
+ memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+ memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
+
+- /* set the correct pool for the new PF MAC address in entry 0 */
+- igb_rar_set_qsel(adapter, hw->mac.addr, 0,
+- adapter->vfs_allocated_count);
++ /* set the correct pool for the new PF MAC address in entry 0 */
++ igb_rar_set_qsel(adapter, hw->mac.addr, 0,
++ adapter->vfs_allocated_count);
++
++ return 0;
++}
++
++/**
++ * igb_write_mc_addr_list - write multicast addresses to MTA
++ * @netdev: network interface device structure
++ *
++ * Writes multicast address list to the MTA hash table.
++ * Returns: -ENOMEM on failure
++ * 0 on no addresses written
++ * X on writing X addresses to MTA
++ **/
++int igb_write_mc_addr_list(struct net_device *netdev)
++{
++ struct igb_adapter *adapter = netdev_priv(netdev);
++ struct e1000_hw *hw = &adapter->hw;
++#ifdef NETDEV_HW_ADDR_T_MULTICAST
++ struct netdev_hw_addr *ha;
++#else
++ struct dev_mc_list *ha;
++#endif
++ u8 *mta_list;
++ int i, count;
++#ifdef CONFIG_IGB_VMDQ_NETDEV
++ int vm;
++#endif
++ count = netdev_mc_count(netdev);
++#ifdef CONFIG_IGB_VMDQ_NETDEV
++ for (vm = 1; vm < adapter->vmdq_pools; vm++) {
++ if (!adapter->vmdq_netdev[vm])
++ break;
++ if (!netif_running(adapter->vmdq_netdev[vm]))
++ continue;
++ count += netdev_mc_count(adapter->vmdq_netdev[vm]);
++ }
++#endif
++
++ if (!count) {
++ e1000_update_mc_addr_list(hw, NULL, 0);
++ return 0;
++ }
++ mta_list = kzalloc(count * 6, GFP_ATOMIC);
++ if (!mta_list)
++ return -ENOMEM;
++
++ /* The shared function expects a packed array of only addresses. */
++ i = 0;
++ netdev_for_each_mc_addr(ha, netdev)
++#ifdef NETDEV_HW_ADDR_T_MULTICAST
++ memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN);
++#else
++ memcpy(mta_list + (i++ * ETH_ALEN), ha->dmi_addr, ETH_ALEN);
++#endif
++#ifdef CONFIG_IGB_VMDQ_NETDEV
++ for (vm = 1; vm < adapter->vmdq_pools; vm++) {
++ if (!adapter->vmdq_netdev[vm])
++ break;
++ if (!netif_running(adapter->vmdq_netdev[vm]) ||
++ !netdev_mc_count(adapter->vmdq_netdev[vm]))
++ continue;
++ netdev_for_each_mc_addr(ha, adapter->vmdq_netdev[vm])
++#ifdef NETDEV_HW_ADDR_T_MULTICAST
++ memcpy(mta_list + (i++ * ETH_ALEN),
++ ha->addr, ETH_ALEN);
++#else
++ memcpy(mta_list + (i++ * ETH_ALEN),
++ ha->dmi_addr, ETH_ALEN);
++#endif
++ }
++#endif
++ e1000_update_mc_addr_list(hw, mta_list, i);
++ kfree(mta_list);
++
++ return count;
++}
++
++void igb_full_sync_mac_table(struct igb_adapter *adapter)
++{
++ struct e1000_hw *hw = &adapter->hw;
++ int i;
+
+- return 0;
++ for (i = 0; i < hw->mac.rar_entry_count; i++)
++ igb_rar_set(adapter, i);
+ }
+
+-/**
+- * igb_write_mc_addr_list - write multicast addresses to MTA
+- * @netdev: network interface device structure
+- *
+- * Writes multicast address list to the MTA hash table.
+- * Returns: -ENOMEM on failure
+- * 0 on no addresses written
+- * X on writing X addresses to MTA
+- **/
+-static int igb_write_mc_addr_list(struct net_device *netdev)
++void igb_sync_mac_table(struct igb_adapter *adapter)
+ {
+- struct igb_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+- struct netdev_hw_addr *ha;
+- u8 *mta_list;
+ int i;
+
+- if (netdev_mc_empty(netdev)) {
+- /* nothing to program, so clear mc list */
+- igb_update_mc_addr_list(hw, NULL, 0);
+- igb_restore_vf_multicasts(adapter);
+- return 0;
++ for (i = 0; i < hw->mac.rar_entry_count; i++) {
++ if (adapter->mac_table[i].state & IGB_MAC_STATE_MODIFIED)
++ igb_rar_set(adapter, i);
++ adapter->mac_table[i].state &= ~(IGB_MAC_STATE_MODIFIED);
+ }
++}
+
+- mta_list = kzalloc(netdev_mc_count(netdev) * 6, GFP_ATOMIC);
+- if (!mta_list)
+- return -ENOMEM;
++int igb_available_rars(struct igb_adapter *adapter)
++{
++ struct e1000_hw *hw = &adapter->hw;
++ int i, count = 0;
+
+- /* The shared function expects a packed array of only addresses. */
+- i = 0;
+- netdev_for_each_mc_addr(ha, netdev)
+- memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN);
++ for (i = 0; i < hw->mac.rar_entry_count; i++) {
++ if (adapter->mac_table[i].state == 0)
++ count++;
++ }
++ return count;
++}
+
+- igb_update_mc_addr_list(hw, mta_list, i);
+- kfree(mta_list);
++static void igb_rar_set_qsel(struct igb_adapter *adapter, u8 *addr, u32 index,
++ u8 qsel)
++{
++ u32 rar_low, rar_high;
++ struct e1000_hw *hw = &adapter->hw;
++
++ /* HW expects these in little endian so we reverse the byte order
++ * from network order (big endian) to little endian
++ */
++ rar_low = ((u32) addr[0] | ((u32) addr[1] << 8) |
++ ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
++ rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
++
++ /* Indicate to hardware the Address is Valid. */
++ rar_high |= E1000_RAH_AV;
++
++ if (hw->mac.type == e1000_82575)
++ rar_high |= E1000_RAH_POOL_1 * qsel;
++ else
++ rar_high |= E1000_RAH_POOL_1 << qsel;
+
+- return netdev_mc_count(netdev);
++ E1000_WRITE_REG(hw, E1000_RAL(index), rar_low);
++ E1000_WRITE_FLUSH(hw);
++ E1000_WRITE_REG(hw, E1000_RAH(index), rar_high);
++ E1000_WRITE_FLUSH(hw);
+ }
+
++#ifdef HAVE_SET_RX_MODE
+ /**
+- * igb_write_uc_addr_list - write unicast addresses to RAR table
+- * @netdev: network interface device structure
++ * igb_write_uc_addr_list - write unicast addresses to RAR table
++ * @netdev: network interface device structure
+ *
+- * Writes unicast address list to the RAR table.
+- * Returns: -ENOMEM on failure/insufficient address space
+- * 0 on no addresses written
+- * X on writing X addresses to the RAR table
++ * Writes unicast address list to the RAR table.
++ * Returns: -ENOMEM on failure/insufficient address space
++ * 0 on no addresses written
++ * X on writing X addresses to the RAR table
+ **/
+ static int igb_write_uc_addr_list(struct net_device *netdev)
+ {
+@@ -3974,39 +4535,48 @@
+ int count = 0;
+
+ /* return ENOMEM indicating insufficient memory for addresses */
+- if (netdev_uc_count(netdev) > rar_entries)
++ if (netdev_uc_count(netdev) > igb_available_rars(adapter))
+ return -ENOMEM;
+-
+ if (!netdev_uc_empty(netdev) && rar_entries) {
++#ifdef NETDEV_HW_ADDR_T_UNICAST
+ struct netdev_hw_addr *ha;
+-
++#else
++ struct dev_mc_list *ha;
++#endif
+ netdev_for_each_uc_addr(ha, netdev) {
++#ifdef NETDEV_HW_ADDR_T_UNICAST
+ if (!rar_entries)
+ break;
+ igb_rar_set_qsel(adapter, ha->addr,
+ rar_entries--,
+ vfn);
++#else
++ igb_rar_set_qsel(adapter, ha->da_addr,
++ rar_entries--,
++ vfn);
++#endif
+ count++;
+ }
+ }
++
+ /* write the addresses in reverse order to avoid write combining */
+ for (; rar_entries > 0 ; rar_entries--) {
+- wr32(E1000_RAH(rar_entries), 0);
+- wr32(E1000_RAL(rar_entries), 0);
++ E1000_WRITE_REG(hw, E1000_RAH(rar_entries), 0);
++ E1000_WRITE_REG(hw, E1000_RAL(rar_entries), 0);
+ }
+- wrfl();
+-
++ E1000_WRITE_FLUSH(hw);
+ return count;
+ }
+
++#endif /* HAVE_SET_RX_MODE */
+ /**
+- * igb_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set
+- * @netdev: network interface device structure
++ * igb_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set
++ * @netdev: network interface device structure
+ *
+- * The set_rx_mode entry point is called whenever the unicast or multicast
+- * address lists or the network interface flags are updated. This routine is
+- * responsible for configuring the hardware for proper unicast, multicast,
+- * promiscuous mode, and all-multi behavior.
++ * The set_rx_mode entry point is called whenever the unicast or multicast
++ * address lists or the network interface flags are updated. This routine is
++ * responsible for configuring the hardware for proper unicast, multicast,
++ * promiscuous mode, and all-multi behavior.
+ **/
+ static void igb_set_rx_mode(struct net_device *netdev)
+ {
+@@ -4017,23 +4587,24 @@
+ int count;
+
+ /* Check for Promiscuous and All Multicast modes */
+- rctl = rd32(E1000_RCTL);
++ rctl = E1000_READ_REG(hw, E1000_RCTL);
+
+ /* clear the effected bits */
+ rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE | E1000_RCTL_VFE);
+
+ if (netdev->flags & IFF_PROMISC) {
+- /* retain VLAN HW filtering if in VT mode */
+- if (adapter->vfs_allocated_count)
+- rctl |= E1000_RCTL_VFE;
+ rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
+ vmolr |= (E1000_VMOLR_ROPE | E1000_VMOLR_MPME);
++ /* retain VLAN HW filtering if in VT mode */
++ if (adapter->vfs_allocated_count || adapter->vmdq_pools)
++ rctl |= E1000_RCTL_VFE;
+ } else {
+ if (netdev->flags & IFF_ALLMULTI) {
+ rctl |= E1000_RCTL_MPE;
+ vmolr |= E1000_VMOLR_MPME;
+ } else {
+- /* Write addresses to the MTA, if the attempt fails
++ /*
++ * Write addresses to the MTA, if the attempt fails
+ * then we should just turn on promiscuous mode so
+ * that we can at least receive multicast traffic
+ */
+@@ -4045,7 +4616,9 @@
+ vmolr |= E1000_VMOLR_ROMPE;
+ }
+ }
+- /* Write addresses to available RAR registers, if there is not
++#ifdef HAVE_SET_RX_MODE
++ /*
++ * Write addresses to available RAR registers, if there is not
+ * sufficient space to store all the addresses then enable
+ * unicast promiscuous mode
+ */
+@@ -4054,21 +4627,23 @@
+ rctl |= E1000_RCTL_UPE;
+ vmolr |= E1000_VMOLR_ROPE;
+ }
++#endif /* HAVE_SET_RX_MODE */
+ rctl |= E1000_RCTL_VFE;
+ }
+- wr32(E1000_RCTL, rctl);
++ E1000_WRITE_REG(hw, E1000_RCTL, rctl);
+
+- /* In order to support SR-IOV and eventually VMDq it is necessary to set
++ /*
++ * In order to support SR-IOV and eventually VMDq it is necessary to set
+ * the VMOLR to enable the appropriate modes. Without this workaround
+ * we will have issues with VLAN tag stripping not being done for frames
+ * that are only arriving because we are the default pool
+ */
+- if ((hw->mac.type < e1000_82576) || (hw->mac.type > e1000_i350))
++ if (hw->mac.type < e1000_82576)
+ return;
+
+- vmolr |= rd32(E1000_VMOLR(vfn)) &
+- ~(E1000_VMOLR_ROPE | E1000_VMOLR_MPME | E1000_VMOLR_ROMPE);
+- wr32(E1000_VMOLR(vfn), vmolr);
++ vmolr |= E1000_READ_REG(hw, E1000_VMOLR(vfn)) &
++ ~(E1000_VMOLR_ROPE | E1000_VMOLR_MPME | E1000_VMOLR_ROMPE);
++ E1000_WRITE_REG(hw, E1000_VMOLR(vfn), vmolr);
+ igb_restore_vf_multicasts(adapter);
+ }
+
+@@ -4080,7 +4655,7 @@
+ switch (hw->mac.type) {
+ case e1000_82576:
+ case e1000_i350:
+- wvbr = rd32(E1000_WVBR);
++ wvbr = E1000_READ_REG(hw, E1000_WVBR);
+ if (!wvbr)
+ return;
+ break;
+@@ -4100,15 +4675,34 @@
+ if (!adapter->wvbr)
+ return;
+
+- for (j = 0; j < adapter->vfs_allocated_count; j++) {
+- if (adapter->wvbr & (1 << j) ||
+- adapter->wvbr & (1 << (j + IGB_STAGGERED_QUEUE_OFFSET))) {
+- dev_warn(&adapter->pdev->dev,
+- "Spoof event(s) detected on VF %d\n", j);
+- adapter->wvbr &=
+- ~((1 << j) |
+- (1 << (j + IGB_STAGGERED_QUEUE_OFFSET)));
++ switch (adapter->hw.mac.type) {
++ case e1000_82576:
++ for (j = 0; j < adapter->vfs_allocated_count; j++) {
++ if (adapter->wvbr & (1 << j) ||
++ adapter->wvbr & (1 << (j
++ + IGB_STAGGERED_QUEUE_OFFSET))) {
++ DPRINTK(DRV, WARNING,
++ "Spoof event(s) detected on VF %d\n",
++ j);
++ adapter->wvbr &=
++ ~((1 << j) |
++ (1 << (j +
++ IGB_STAGGERED_QUEUE_OFFSET)));
++ }
++ }
++ break;
++ case e1000_i350:
++ for (j = 0; j < adapter->vfs_allocated_count; j++) {
++ if (adapter->wvbr & (1 << j)) {
++ DPRINTK(DRV, WARNING,
++ "Spoof event(s) detected on VF %d\n",
++ j);
++ adapter->wvbr &= ~(1 << j);
++ }
+ }
++ break;
++ default:
++ break;
+ }
+ }
+
+@@ -4118,21 +4712,22 @@
+ static void igb_update_phy_info(unsigned long data)
+ {
+ struct igb_adapter *adapter = (struct igb_adapter *) data;
+- igb_get_phy_info(&adapter->hw);
++
++ e1000_get_phy_info(&adapter->hw);
+ }
+
+ /**
+- * igb_has_link - check shared code for link and determine up/down
+- * @adapter: pointer to driver private info
++ * igb_has_link - check shared code for link and determine up/down
++ * @adapter: pointer to driver private info
+ **/
+ bool igb_has_link(struct igb_adapter *adapter)
+ {
+ struct e1000_hw *hw = &adapter->hw;
+- bool link_active = false;
++ bool link_active = FALSE;
+
+ /* get_link_status is set on LSC (link status) interrupt or
+ * rx sequence error interrupt. get_link_status will stay
+- * false until the e1000_check_for_link establishes link
++ * false until the igb_e1000_check_for_link establishes link
+ * for copper adapters ONLY
+ */
+ switch (hw->phy.media_type) {
+@@ -4140,11 +4735,11 @@
+ if (!hw->mac.get_link_status)
+ return true;
+ case e1000_media_type_internal_serdes:
+- hw->mac.ops.check_for_link(hw);
++ igb_e1000_check_for_link(hw);
+ link_active = !hw->mac.get_link_status;
+ break;
+- default:
+ case e1000_media_type_unknown:
++ default:
+ break;
+ }
+
+@@ -4162,27 +4757,9 @@
+ return link_active;
+ }
+
+-static bool igb_thermal_sensor_event(struct e1000_hw *hw, u32 event)
+-{
+- bool ret = false;
+- u32 ctrl_ext, thstat;
+-
+- /* check for thermal sensor event on i350 copper only */
+- if (hw->mac.type == e1000_i350) {
+- thstat = rd32(E1000_THSTAT);
+- ctrl_ext = rd32(E1000_CTRL_EXT);
+-
+- if ((hw->phy.media_type == e1000_media_type_copper) &&
+- !(ctrl_ext & E1000_CTRL_EXT_LINK_MODE_SGMII))
+- ret = !!(thstat & event);
+- }
+-
+- return ret;
+-}
+-
+ /**
+- * igb_watchdog - Timer Call-back
+- * @data: pointer to adapter cast into an unsigned long
++ * igb_watchdog - Timer Call-back
++ * @data: pointer to adapter cast into an unsigned long
+ **/
+ static void igb_watchdog(unsigned long data)
+ {
+@@ -4197,29 +4774,28 @@
+ struct igb_adapter,
+ watchdog_task);
+ struct e1000_hw *hw = &adapter->hw;
+- struct e1000_phy_info *phy = &hw->phy;
+ struct net_device *netdev = adapter->netdev;
+- u32 link;
++ u32 thstat, ctrl_ext, link;
+ int i;
+ u32 connsw;
+
+ link = igb_has_link(adapter);
+
+- if (adapter->flags & IGB_FLAG_NEED_LINK_UPDATE) {
+- if (time_after(jiffies, (adapter->link_check_timeout + HZ)))
+- adapter->flags &= ~IGB_FLAG_NEED_LINK_UPDATE;
+- else
+- link = false;
+- }
+-
+ /* Force link down if we have fiber to swap to */
+ if (adapter->flags & IGB_FLAG_MAS_ENABLE) {
+ if (hw->phy.media_type == e1000_media_type_copper) {
+- connsw = rd32(E1000_CONNSW);
++ connsw = E1000_READ_REG(hw, E1000_CONNSW);
+ if (!(connsw & E1000_CONNSW_AUTOSENSE_EN))
+ link = 0;
+ }
+ }
++ if (adapter->flags & IGB_FLAG_NEED_LINK_UPDATE) {
++ if (time_after(jiffies, (adapter->link_check_timeout + HZ)))
++ adapter->flags &= ~IGB_FLAG_NEED_LINK_UPDATE;
++ else
++ link = FALSE;
++ }
++
+ if (link) {
+ /* Perform a reset if the media type changed. */
+ if (hw->dev_spec._82575.media_changed) {
+@@ -4227,48 +4803,29 @@
+ adapter->flags |= IGB_FLAG_MEDIA_RESET;
+ igb_reset(adapter);
+ }
++
+ /* Cancel scheduled suspend requests. */
+ pm_runtime_resume(netdev->dev.parent);
+
+ if (!netif_carrier_ok(netdev)) {
+ u32 ctrl;
+
+- hw->mac.ops.get_speed_and_duplex(hw,
+- &adapter->link_speed,
+- &adapter->link_duplex);
++ igb_e1000_get_speed_and_duplex(hw,
++ &adapter->link_speed,
++ &adapter->link_duplex);
+
+- ctrl = rd32(E1000_CTRL);
++ ctrl = E1000_READ_REG(hw, E1000_CTRL);
+ /* Links status message must follow this format */
+ netdev_info(netdev,
+- "igb: %s NIC Link is Up %d Mbps %s Duplex, Flow Control: %s\n",
+- netdev->name,
+- adapter->link_speed,
+- adapter->link_duplex == FULL_DUPLEX ?
+- "Full" : "Half",
+- (ctrl & E1000_CTRL_TFCE) &&
+- (ctrl & E1000_CTRL_RFCE) ? "RX/TX" :
+- (ctrl & E1000_CTRL_RFCE) ? "RX" :
+- (ctrl & E1000_CTRL_TFCE) ? "TX" : "None");
+-
+- /* disable EEE if enabled */
+- if ((adapter->flags & IGB_FLAG_EEE) &&
+- (adapter->link_duplex == HALF_DUPLEX)) {
+- dev_info(&adapter->pdev->dev,
+- "EEE Disabled: unsupported at half duplex. Re-enable using ethtool when at full duplex.\n");
+- adapter->hw.dev_spec._82575.eee_disable = true;
+- adapter->flags &= ~IGB_FLAG_EEE;
+- }
+-
+- /* check if SmartSpeed worked */
+- igb_check_downshift(hw);
+- if (phy->speed_downgraded)
+- netdev_warn(netdev, "Link Speed was downgraded by SmartSpeed\n");
+-
+- /* check for thermal sensor event */
+- if (igb_thermal_sensor_event(hw,
+- E1000_THSTAT_LINK_THROTTLE))
+- netdev_info(netdev, "The network adapter link speed was downshifted because it overheated\n");
+-
++ "igb: %s NIC Link is Up %d Mbps %s, Flow Control: %s\n",
++ netdev->name,
++ adapter->link_speed,
++ adapter->link_duplex == FULL_DUPLEX ?
++ "Full Duplex" : "Half Duplex",
++ ((ctrl & E1000_CTRL_TFCE) &&
++ (ctrl & E1000_CTRL_RFCE)) ? "RX/TX" :
++ ((ctrl & E1000_CTRL_RFCE) ? "RX" :
++ ((ctrl & E1000_CTRL_TFCE) ? "TX" : "None")));
+ /* adjust timeout factor according to speed/duplex */
+ adapter->tx_timeout_factor = 1;
+ switch (adapter->link_speed) {
+@@ -4278,12 +4835,17 @@
+ case SPEED_100:
+ /* maybe add some timeout factor ? */
+ break;
++ default:
++ break;
+ }
+
+ netif_carrier_on(netdev);
++ netif_tx_wake_all_queues(netdev);
+
+ igb_ping_all_vfs(adapter);
++#ifdef IFLA_VF_MAX
+ igb_check_vf_rate_limit(adapter);
++#endif /* IFLA_VF_MAX */
+
+ /* link state has changed, schedule phy info update */
+ if (!test_bit(__IGB_DOWN, &adapter->state))
+@@ -4294,17 +4856,33 @@
+ if (netif_carrier_ok(netdev)) {
+ adapter->link_speed = 0;
+ adapter->link_duplex = 0;
+-
+- /* check for thermal sensor event */
+- if (igb_thermal_sensor_event(hw,
+- E1000_THSTAT_PWR_DOWN)) {
+- netdev_err(netdev, "The network adapter was stopped because it overheated\n");
++ /* check for thermal sensor event on i350 */
++ if (hw->mac.type == e1000_i350) {
++ thstat = E1000_READ_REG(hw, E1000_THSTAT);
++ ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
++ if ((hw->phy.media_type ==
++ e1000_media_type_copper) &&
++ !(ctrl_ext &
++ E1000_CTRL_EXT_LINK_MODE_SGMII)) {
++ if (thstat & E1000_THSTAT_PWR_DOWN) {
++ netdev_err(netdev,
++ "igb: %s The network adapter was stopped because it overheated.\n",
++ netdev->name);
++ }
++ if (thstat &
++ E1000_THSTAT_LINK_THROTTLE) {
++ netdev_err(netdev,
++ "igb: %s The network adapter supported link speed was downshifted because it overheated.\n",
++ netdev->name);
++ }
++ }
+ }
+
+ /* Links status message must follow this format */
+ netdev_info(netdev, "igb: %s NIC Link is Down\n",
+ netdev->name);
+ netif_carrier_off(netdev);
++ netif_tx_stop_all_queues(netdev);
+
+ igb_ping_all_vfs(adapter);
+
+@@ -4312,7 +4890,6 @@
+ if (!test_bit(__IGB_DOWN, &adapter->state))
+ mod_timer(&adapter->phy_info_timer,
+ round_jiffies(jiffies + 2 * HZ));
+-
+ /* link is down, time to check for alternate media */
+ if (adapter->flags & IGB_FLAG_MAS_ENABLE) {
+ igb_check_swap_media(adapter);
+@@ -4328,6 +4905,7 @@
+ /* also check for alternate media here */
+ } else if (!netif_carrier_ok(netdev) &&
+ (adapter->flags & IGB_FLAG_MAS_ENABLE)) {
++ hw->mac.ops.power_up_serdes(hw);
+ igb_check_swap_media(adapter);
+ if (adapter->flags & IGB_FLAG_MEDIA_RESET) {
+ schedule_work(&adapter->reset_task);
+@@ -4337,12 +4915,11 @@
+ }
+ }
+
+- spin_lock(&adapter->stats64_lock);
+- igb_update_stats(adapter, &adapter->stats64);
+- spin_unlock(&adapter->stats64_lock);
++ igb_update_stats(adapter);
+
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ struct igb_ring *tx_ring = adapter->tx_ring[i];
++
+ if (!netif_carrier_ok(netdev)) {
+ /* We've lost link, so the controller stops DMA,
+ * but we've got queued Tx work that's never going
+@@ -4361,19 +4938,18 @@
+ set_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags);
+ }
+
+- /* Cause software interrupt to ensure Rx ring is cleaned */
+- if (adapter->flags & IGB_FLAG_HAS_MSIX) {
++ /* Cause software interrupt to ensure rx ring is cleaned */
++ if (adapter->msix_entries) {
+ u32 eics = 0;
+
+ for (i = 0; i < adapter->num_q_vectors; i++)
+ eics |= adapter->q_vector[i]->eims_value;
+- wr32(E1000_EICS, eics);
++ E1000_WRITE_REG(hw, E1000_EICS, eics);
+ } else {
+- wr32(E1000_ICS, E1000_ICS_RXDMT0);
++ E1000_WRITE_REG(hw, E1000_ICS, E1000_ICS_RXDMT0);
+ }
+
+ igb_spoof_check(adapter);
+- igb_ptp_rx_hang(adapter);
+
+ /* Reset the timer */
+ if (!test_bit(__IGB_DOWN, &adapter->state)) {
+@@ -4386,6 +4962,70 @@
+ }
+ }
+
++static void igb_dma_err_task(struct work_struct *work)
++{
++ struct igb_adapter *adapter = container_of(work,
++ struct igb_adapter,
++ dma_err_task);
++ int vf;
++ struct e1000_hw *hw = &adapter->hw;
++ struct net_device *netdev = adapter->netdev;
++ u32 hgptc;
++ u32 ciaa, ciad;
++
++ hgptc = E1000_READ_REG(hw, E1000_HGPTC);
++ if (hgptc) /* If incrementing then no need for the check below */
++ goto dma_timer_reset;
++ /*
++ * Check to see if a bad DMA write target from an errant or
++ * malicious VF has caused a PCIe error. If so then we can
++ * issue a VFLR to the offending VF(s) and then resume without
++ * requesting a full slot reset.
++ */
++
++ for (vf = 0; vf < adapter->vfs_allocated_count; vf++) {
++ ciaa = (vf << 16) | 0x80000000;
++ /* 32 bit read so align, we really want status at offset 6 */
++ ciaa |= PCI_COMMAND;
++ E1000_WRITE_REG(hw, E1000_CIAA, ciaa);
++ ciad = E1000_READ_REG(hw, E1000_CIAD);
++ ciaa &= 0x7FFFFFFF;
++ /* disable debug mode asap after reading data */
++ E1000_WRITE_REG(hw, E1000_CIAA, ciaa);
++ /* Get the upper 16 bits which will be the PCI status reg */
++ ciad >>= 16;
++ if (ciad & (PCI_STATUS_REC_MASTER_ABORT |
++ PCI_STATUS_REC_TARGET_ABORT |
++ PCI_STATUS_SIG_SYSTEM_ERROR)) {
++ netdev_err(netdev, "VF %d suffered error\n", vf);
++ /* Issue VFLR */
++ ciaa = (vf << 16) | 0x80000000;
++ ciaa |= 0xA8;
++ E1000_WRITE_REG(hw, E1000_CIAA, ciaa);
++ ciad = 0x00008000; /* VFLR */
++ E1000_WRITE_REG(hw, E1000_CIAD, ciad);
++ ciaa &= 0x7FFFFFFF;
++ E1000_WRITE_REG(hw, E1000_CIAA, ciaa);
++ }
++ }
++dma_timer_reset:
++ /* Reset the timer */
++ if (!test_bit(__IGB_DOWN, &adapter->state))
++ mod_timer(&adapter->dma_err_timer,
++ round_jiffies(jiffies + HZ / 10));
++}
++
++/**
++ * igb_dma_err_timer - Timer Call-back
++ * @data: pointer to adapter cast into an unsigned long
++ **/
++static void igb_dma_err_timer(unsigned long data)
++{
++ struct igb_adapter *adapter = (struct igb_adapter *)data;
++ /* Do the rest outside of interrupt context */
++ schedule_work(&adapter->dma_err_task);
++}
++
+ enum latency_range {
+ lowest_latency = 0,
+ low_latency = 1,
+@@ -4394,19 +5034,20 @@
+ };
+
+ /**
+- * igb_update_ring_itr - update the dynamic ITR value based on packet size
+- * @q_vector: pointer to q_vector
++ * igb_update_ring_itr - update the dynamic ITR value based on packet size
+ *
+- * Stores a new ITR value based on strictly on packet size. This
+- * algorithm is less sophisticated than that used in igb_update_itr,
+- * due to the difficulty of synchronizing statistics across multiple
+- * receive rings. The divisors and thresholds used by this function
+- * were determined based on theoretical maximum wire speed and testing
+- * data, in order to minimize response time while increasing bulk
+- * throughput.
+- * This functionality is controlled by ethtool's coalescing settings.
+- * NOTE: This function is called only when operating in a multiqueue
+- * receive environment.
++ * Stores a new ITR value based on strictly on packet size. This
++ * algorithm is less sophisticated than that used in igb_update_itr,
++ * due to the difficulty of synchronizing statistics across multiple
++ * receive rings. The divisors and thresholds used by this function
++ * were determined based on theoretical maximum wire speed and testing
++ * data, in order to minimize response time while increasing bulk
++ * throughput.
++ * This functionality is controlled by the InterruptThrottleRate module
++ * parameter (see igb_param.c)
++ * NOTE: This function is called only when operating in a multiqueue
++ * receive environment.
++ * @q_vector: pointer to q_vector
+ **/
+ static void igb_update_ring_itr(struct igb_q_vector *q_vector)
+ {
+@@ -4418,9 +5059,13 @@
+ /* For non-gigabit speeds, just fix the interrupt rate at 4000
+ * ints/sec - ITR timer value of 120 ticks.
+ */
+- if (adapter->link_speed != SPEED_1000) {
++ switch (adapter->link_speed) {
++ case SPEED_10:
++ case SPEED_100:
+ new_val = IGB_4K_ITR;
+ goto set_itr_val;
++ default:
++ break;
+ }
+
+ packets = q_vector->rx.total_packets;
+@@ -4467,20 +5112,20 @@
+ }
+
+ /**
+- * igb_update_itr - update the dynamic ITR value based on statistics
+- * @q_vector: pointer to q_vector
+- * @ring_container: ring info to update the itr for
+- *
+- * Stores a new ITR value based on packets and byte
+- * counts during the last interrupt. The advantage of per interrupt
+- * computation is faster updates and more accurate ITR for the current
+- * traffic pattern. Constants in this function were computed
+- * based on theoretical maximum wire speed and thresholds were set based
+- * on testing data as well as attempting to minimize response time
+- * while increasing bulk throughput.
+- * This functionality is controlled by ethtool's coalescing settings.
+- * NOTE: These calculations are only valid when operating in a single-
+- * queue environment.
++ * igb_update_itr - update the dynamic ITR value based on statistics
++ * Stores a new ITR value based on packets and byte
++ * counts during the last interrupt. The advantage of per interrupt
++ * computation is faster updates and more accurate ITR for the current
++ * traffic pattern. Constants in this function were computed
++ * based on theoretical maximum wire speed and thresholds were set based
++ * on testing data as well as attempting to minimize response time
++ * while increasing bulk throughput.
++ * this functionality is controlled by the InterruptThrottleRate module
++ * parameter (see igb_param.c)
++ * NOTE: These calculations are only valid when operating in a single-
++ * queue environment.
++ * @q_vector: pointer to q_vector
++ * @ring_container: ring info to update the itr for
+ **/
+ static void igb_update_itr(struct igb_q_vector *q_vector,
+ struct igb_ring_container *ring_container)
+@@ -4504,12 +5149,13 @@
+ case low_latency: /* 50 usec aka 20000 ints/s */
+ if (bytes > 10000) {
+ /* this if handles the TSO accounting */
+- if (bytes/packets > 8000)
++ if (bytes/packets > 8000) {
+ itrval = bulk_latency;
+- else if ((packets < 10) || ((bytes/packets) > 1200))
++ } else if ((packets < 10) || ((bytes/packets) > 1200)) {
+ itrval = bulk_latency;
+- else if ((packets > 35))
++ } else if ((packets > 35)) {
+ itrval = lowest_latency;
++ }
+ } else if (bytes/packets > 2000) {
+ itrval = bulk_latency;
+ } else if (packets <= 2 && bytes < 512) {
+@@ -4541,10 +5187,14 @@
+ u8 current_itr = 0;
+
+ /* for non-gigabit speeds, just fix the interrupt rate at 4000 */
+- if (adapter->link_speed != SPEED_1000) {
++ switch (adapter->link_speed) {
++ case SPEED_10:
++ case SPEED_100:
+ current_itr = 0;
+ new_itr = IGB_4K_ITR;
+ goto set_itr_now;
++ default:
++ break;
+ }
+
+ igb_update_itr(q_vector, &q_vector->tx);
+@@ -4580,9 +5230,9 @@
+ * increasing
+ */
+ new_itr = new_itr > q_vector->itr_val ?
+- max((new_itr * q_vector->itr_val) /
+- (new_itr + (q_vector->itr_val >> 2)),
+- new_itr) : new_itr;
++ max((new_itr * q_vector->itr_val) /
++ (new_itr + (q_vector->itr_val >> 2)),
++ new_itr) : new_itr;
+ /* Don't write the value here; it resets the adapter's
+ * internal timer, and causes us to delay far longer than
+ * we should between interrupts. Instead, we write the ITR
+@@ -4594,8 +5244,8 @@
+ }
+ }
+
+-static void igb_tx_ctxtdesc(struct igb_ring *tx_ring, u32 vlan_macip_lens,
+- u32 type_tucmd, u32 mss_l4len_idx)
++void igb_tx_ctxtdesc(struct igb_ring *tx_ring, u32 vlan_macip_lens,
++ u32 type_tucmd, u32 mss_l4len_idx)
+ {
+ struct e1000_adv_tx_context_desc *context_desc;
+ u16 i = tx_ring->next_to_use;
+@@ -4622,26 +5272,32 @@
+ struct igb_tx_buffer *first,
+ u8 *hdr_len)
+ {
++#ifdef NETIF_F_TSO
+ struct sk_buff *skb = first->skb;
+ u32 vlan_macip_lens, type_tucmd;
+ u32 mss_l4len_idx, l4len;
+- int err;
+
+ if (skb->ip_summed != CHECKSUM_PARTIAL)
+ return 0;
+
+ if (!skb_is_gso(skb))
++#endif /* NETIF_F_TSO */
+ return 0;
++#ifdef NETIF_F_TSO
+
+- err = skb_cow_head(skb, 0);
+- if (err < 0)
+- return err;
++ if (skb_header_cloned(skb)) {
++ int err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
++
++ if (err)
++ return err;
++ }
+
+ /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
+ type_tucmd = E1000_ADVTXD_TUCMD_L4T_TCP;
+
+ if (first->protocol == htons(ETH_P_IP)) {
+ struct iphdr *iph = ip_hdr(skb);
++
+ iph->tot_len = 0;
+ iph->check = 0;
+ tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
+@@ -4652,6 +5308,7 @@
+ first->tx_flags |= IGB_TX_FLAGS_TSO |
+ IGB_TX_FLAGS_CSUM |
+ IGB_TX_FLAGS_IPV4;
++#ifdef NETIF_F_TSO6
+ } else if (skb_is_gso_v6(skb)) {
+ ipv6_hdr(skb)->payload_len = 0;
+ tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
+@@ -4659,6 +5316,7 @@
+ 0, IPPROTO_TCP, 0);
+ first->tx_flags |= IGB_TX_FLAGS_TSO |
+ IGB_TX_FLAGS_CSUM;
++#endif
+ }
+
+ /* compute header lengths */
+@@ -4681,6 +5339,7 @@
+ igb_tx_ctxtdesc(tx_ring, vlan_macip_lens, type_tucmd, mss_l4len_idx);
+
+ return 1;
++#endif /* NETIF_F_TSO */
+ }
+
+ static void igb_tx_csum(struct igb_ring *tx_ring, struct igb_tx_buffer *first)
+@@ -4694,38 +5353,42 @@
+ if (!(first->tx_flags & IGB_TX_FLAGS_VLAN))
+ return;
+ } else {
+- u8 l4_hdr = 0;
++ u8 nexthdr = 0;
+
+ switch (first->protocol) {
+- case htons(ETH_P_IP):
++ case __constant_htons(ETH_P_IP):
+ vlan_macip_lens |= skb_network_header_len(skb);
+ type_tucmd |= E1000_ADVTXD_TUCMD_IPV4;
+- l4_hdr = ip_hdr(skb)->protocol;
++ nexthdr = ip_hdr(skb)->protocol;
+ break;
+- case htons(ETH_P_IPV6):
++#ifdef NETIF_F_IPV6_CSUM
++ case __constant_htons(ETH_P_IPV6):
+ vlan_macip_lens |= skb_network_header_len(skb);
+- l4_hdr = ipv6_hdr(skb)->nexthdr;
++ nexthdr = ipv6_hdr(skb)->nexthdr;
+ break;
++#endif
+ default:
+ if (unlikely(net_ratelimit())) {
+ dev_warn(tx_ring->dev,
+- "partial checksum but proto=%x!\n",
+- first->protocol);
++ "partial checksum but proto=%x!\n",
++ first->protocol);
+ }
+ break;
+ }
+
+- switch (l4_hdr) {
++ switch (nexthdr) {
+ case IPPROTO_TCP:
+ type_tucmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
+ mss_l4len_idx = tcp_hdrlen(skb) <<
+ E1000_ADVTXD_L4LEN_SHIFT;
+ break;
++#ifdef HAVE_SCTP
+ case IPPROTO_SCTP:
+ type_tucmd |= E1000_ADVTXD_TUCMD_L4T_SCTP;
+ mss_l4len_idx = sizeof(struct sctphdr) <<
+ E1000_ADVTXD_L4LEN_SHIFT;
+ break;
++#endif
+ case IPPROTO_UDP:
+ mss_l4len_idx = sizeof(struct udphdr) <<
+ E1000_ADVTXD_L4LEN_SHIFT;
+@@ -4733,8 +5396,8 @@
+ default:
+ if (unlikely(net_ratelimit())) {
+ dev_warn(tx_ring->dev,
+- "partial checksum but l4 proto=%x!\n",
+- l4_hdr);
++ "partial checksum but l4 proto=%x!\n",
++ nexthdr);
+ }
+ break;
+ }
+@@ -4773,9 +5436,6 @@
+ cmd_type |= IGB_SET_FLAG(tx_flags, IGB_TX_FLAGS_TSTAMP,
+ (E1000_ADVTXD_MAC_TSTAMP));
+
+- /* insert frame checksum */
+- cmd_type ^= IGB_SET_FLAG(skb->no_fcs, 1, E1000_ADVTXD_DCMD_IFCS);
+-
+ return cmd_type;
+ }
+
+@@ -4882,11 +5542,11 @@
+ tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type);
+
+ netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
+-
+ /* set the timestamp */
+ first->time_stamp = jiffies;
+
+- /* Force memory writes to complete before letting h/w know there
++ /*
++ * Force memory writes to complete before letting h/w know there
+ * are new descriptors to fetch. (Only applicable for weak-ordered
+ * memory model archs, such as IA-64).
+ *
+@@ -4907,7 +5567,7 @@
+ writel(i, tx_ring->tail);
+
+ /* we need this if more than one processor can write to our tail
+- * at a time, it synchronizes IO on IA64/Altix systems
++ * at a time, it syncronizes IO on IA64/Altix systems
+ */
+ mmiowb();
+
+@@ -4932,9 +5592,12 @@
+
+ static int __igb_maybe_stop_tx(struct igb_ring *tx_ring, const u16 size)
+ {
+- struct net_device *netdev = tx_ring->netdev;
++ struct net_device *netdev = netdev_ring(tx_ring);
+
+- netif_stop_subqueue(netdev, tx_ring->queue_index);
++ if (netif_is_multiqueue(netdev))
++ netif_stop_subqueue(netdev, ring_queue_index(tx_ring));
++ else
++ netif_stop_queue(netdev);
+
+ /* Herbert's original patch had:
+ * smp_mb__after_netif_stop_queue();
+@@ -4949,11 +5612,12 @@
+ return -EBUSY;
+
+ /* A reprieve! */
+- netif_wake_subqueue(netdev, tx_ring->queue_index);
++ if (netif_is_multiqueue(netdev))
++ netif_wake_subqueue(netdev, ring_queue_index(tx_ring));
++ else
++ netif_wake_queue(netdev);
+
+- u64_stats_update_begin(&tx_ring->tx_syncp2);
+- tx_ring->tx_stats.restart_queue2++;
+- u64_stats_update_end(&tx_ring->tx_syncp2);
++ tx_ring->tx_stats.restart_queue++;
+
+ return 0;
+ }
+@@ -4971,25 +5635,26 @@
+ struct igb_tx_buffer *first;
+ int tso;
+ u32 tx_flags = 0;
++#if PAGE_SIZE > IGB_MAX_DATA_PER_TXD
++ unsigned short f;
++#endif
+ u16 count = TXD_USE_COUNT(skb_headlen(skb));
+ __be16 protocol = vlan_get_protocol(skb);
+ u8 hdr_len = 0;
+
+- /* need: 1 descriptor per page * PAGE_SIZE/IGB_MAX_DATA_PER_TXD,
++ /*
++ * need: 1 descriptor per page * PAGE_SIZE/IGB_MAX_DATA_PER_TXD,
+ * + 1 desc for skb_headlen/IGB_MAX_DATA_PER_TXD,
+ * + 2 desc gap to keep tail from touching head,
+ * + 1 desc for context descriptor,
+ * otherwise try next time
+ */
+- if (NETDEV_FRAG_PAGE_MAX_SIZE > IGB_MAX_DATA_PER_TXD) {
+- unsigned short f;
+-
+- for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
+- count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
+- } else {
+- count += skb_shinfo(skb)->nr_frags;
+- }
+-
++#if PAGE_SIZE > IGB_MAX_DATA_PER_TXD
++ for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
++ count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
++#else
++ count += skb_shinfo(skb)->nr_frags;
++#endif
+ if (igb_maybe_stop_tx(tx_ring, count + 3)) {
+ /* this is a hard error */
+ return NETDEV_TX_BUSY;
+@@ -5001,12 +5666,21 @@
+ first->bytecount = skb->len;
+ first->gso_segs = 1;
+
++#ifdef HAVE_PTP_1588_CLOCK
++#ifdef SKB_SHARED_TX_IS_UNION
++ if (unlikely(skb_tx(skb)->hardware)) {
++#else
+ if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
++#endif
+ struct igb_adapter *adapter = netdev_priv(tx_ring->netdev);
+
+ if (!test_and_set_bit_lock(__IGB_PTP_TX_IN_PROGRESS,
+ &adapter->state)) {
++#ifdef SKB_SHARED_TX_IS_UNION
++ skb_tx(skb)->in_progress = 1;
++#else
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
++#endif
+ tx_flags |= IGB_TX_FLAGS_TSTAMP;
+
+ adapter->ptp_tx_skb = skb_get(skb);
+@@ -5015,12 +5689,11 @@
+ schedule_work(&adapter->ptp_tx_work);
+ }
+ }
+-
++#endif /* HAVE_PTP_1588_CLOCK */
+ skb_tx_timestamp(skb);
+-
+- if (vlan_tx_tag_present(skb)) {
++ if (skb_vlan_tag_present(skb)) {
+ tx_flags |= IGB_TX_FLAGS_VLAN;
+- tx_flags |= (vlan_tx_tag_get(skb) << IGB_TX_FLAGS_VLAN_SHIFT);
++ tx_flags |= (skb_vlan_tag_get(skb) << IGB_TX_FLAGS_VLAN_SHIFT);
+ }
+
+ /* record initial flags and protocol */
+@@ -5035,6 +5708,10 @@
+
+ igb_tx_map(tx_ring, first, hdr_len);
+
++#ifndef HAVE_TRANS_START_IN_QUEUE
++ netdev_ring(tx_ring)->trans_start = jiffies;
++
++#endif
+ /* Make sure there is space in the ring for the next send. */
+ igb_maybe_stop_tx(tx_ring, DESC_NEEDED);
+
+@@ -5046,6 +5723,7 @@
+ return NETDEV_TX_OK;
+ }
+
++#ifdef HAVE_TX_MQ
+ static inline struct igb_ring *igb_tx_queue_mapping(struct igb_adapter *adapter,
+ struct sk_buff *skb)
+ {
+@@ -5056,6 +5734,9 @@
+
+ return adapter->tx_ring[r_idx];
+ }
++#else
++#define igb_tx_queue_mapping(_adapter, _skb) ((_adapter)->tx_ring[0])
++#endif
+
+ static netdev_tx_t igb_xmit_frame(struct sk_buff *skb,
+ struct net_device *netdev)
+@@ -5072,22 +5753,22 @@
+ return NETDEV_TX_OK;
+ }
+
+- /* The minimum packet size with TCTL.PSP set is 17 so pad the skb
++ /*
++ * The minimum packet size with TCTL.PSP set is 17 so pad the skb
+ * in order to meet this minimum size requirement.
+ */
+- if (unlikely(skb->len < 17)) {
+- if (skb_pad(skb, 17 - skb->len))
++ if (skb->len < 17) {
++ if (skb_padto(skb, 17))
+ return NETDEV_TX_OK;
+ skb->len = 17;
+- skb_set_tail_pointer(skb, 17);
+ }
+
+ return igb_xmit_frame_ring(skb, igb_tx_queue_mapping(adapter, skb));
+ }
+
+ /**
+- * igb_tx_timeout - Respond to a Tx Hang
+- * @netdev: network interface device structure
++ * igb_tx_timeout - Respond to a Tx Hang
++ * @netdev: network interface device structure
+ **/
+ static void igb_tx_timeout(struct net_device *netdev)
+ {
+@@ -5101,59 +5782,64 @@
+ hw->dev_spec._82575.global_device_reset = true;
+
+ schedule_work(&adapter->reset_task);
+- wr32(E1000_EICS,
+- (adapter->eims_enable_mask & ~adapter->eims_other));
++ E1000_WRITE_REG(hw, E1000_EICS,
++ (adapter->eims_enable_mask & ~adapter->eims_other));
+ }
+
+ static void igb_reset_task(struct work_struct *work)
+ {
+ struct igb_adapter *adapter;
++
+ adapter = container_of(work, struct igb_adapter, reset_task);
+
+- igb_dump(adapter);
+- netdev_err(adapter->netdev, "Reset adapter\n");
+ igb_reinit_locked(adapter);
+ }
+
+ /**
+- * igb_get_stats64 - Get System Network Statistics
+- * @netdev: network interface device structure
+- * @stats: rtnl_link_stats64 pointer
++ * igb_get_stats - Get System Network Statistics
++ * @netdev: network interface device structure
++ *
++ * Returns the address of the device statistics structure.
++ * The statistics are updated here and also from the timer callback.
+ **/
+-static struct rtnl_link_stats64 *igb_get_stats64(struct net_device *netdev,
+- struct rtnl_link_stats64 *stats)
++static struct net_device_stats *igb_get_stats(struct net_device *netdev)
+ {
+ struct igb_adapter *adapter = netdev_priv(netdev);
+
+- spin_lock(&adapter->stats64_lock);
+- igb_update_stats(adapter, &adapter->stats64);
+- memcpy(stats, &adapter->stats64, sizeof(*stats));
+- spin_unlock(&adapter->stats64_lock);
++ if (!test_bit(__IGB_RESETTING, &adapter->state))
++ igb_update_stats(adapter);
+
+- return stats;
++#ifdef HAVE_NETDEV_STATS_IN_NETDEV
++ /* only return the current stats */
++ return &netdev->stats;
++#else
++ /* only return the current stats */
++ return &adapter->net_stats;
++#endif /* HAVE_NETDEV_STATS_IN_NETDEV */
+ }
+
+ /**
+- * igb_change_mtu - Change the Maximum Transfer Unit
+- * @netdev: network interface device structure
+- * @new_mtu: new value for maximum frame size
++ * igb_change_mtu - Change the Maximum Transfer Unit
++ * @netdev: network interface device structure
++ * @new_mtu: new value for maximum frame size
+ *
+- * Returns 0 on success, negative on failure
++ * Returns 0 on success, negative on failure
+ **/
+ static int igb_change_mtu(struct net_device *netdev, int new_mtu)
+ {
+ struct igb_adapter *adapter = netdev_priv(netdev);
++ struct e1000_hw *hw = &adapter->hw;
+ struct pci_dev *pdev = adapter->pdev;
+ int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
+
+ if ((new_mtu < 68) || (max_frame > MAX_JUMBO_FRAME_SIZE)) {
+- dev_err(&pdev->dev, "Invalid MTU setting\n");
++ dev_err(pci_dev_to_dev(pdev), "Invalid MTU setting\n");
+ return -EINVAL;
+ }
+
+ #define MAX_STD_JUMBO_FRAME_SIZE 9238
+ if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) {
+- dev_err(&pdev->dev, "MTU > 9216 not supported.\n");
++ dev_err(pci_dev_to_dev(pdev), "MTU > 9216 not supported.\n");
+ return -EINVAL;
+ }
+
+@@ -5170,9 +5856,10 @@
+ if (netif_running(netdev))
+ igb_down(adapter);
+
+- dev_info(&pdev->dev, "changing MTU from %d to %d\n",
++ dev_info(pci_dev_to_dev(pdev), "changing MTU from %d to %d\n",
+ netdev->mtu, new_mtu);
+ netdev->mtu = new_mtu;
++ hw->dev_spec._82575.mtu = new_mtu;
+
+ if (netif_running(netdev))
+ igb_up(adapter);
+@@ -5185,53 +5872,74 @@
+ }
+
+ /**
+- * igb_update_stats - Update the board statistics counters
+- * @adapter: board private structure
++ * igb_update_stats - Update the board statistics counters
++ * @adapter: board private structure
+ **/
+-void igb_update_stats(struct igb_adapter *adapter,
+- struct rtnl_link_stats64 *net_stats)
++
++void igb_update_stats(struct igb_adapter *adapter)
+ {
++#ifdef HAVE_NETDEV_STATS_IN_NETDEV
++ struct net_device_stats *net_stats = &adapter->netdev->stats;
++#else
++ struct net_device_stats *net_stats = &adapter->net_stats;
++#endif /* HAVE_NETDEV_STATS_IN_NETDEV */
+ struct e1000_hw *hw = &adapter->hw;
++#ifdef HAVE_PCI_ERS
+ struct pci_dev *pdev = adapter->pdev;
++#endif
+ u32 reg, mpc;
+ u16 phy_tmp;
+ int i;
+ u64 bytes, packets;
+- unsigned int start;
+- u64 _bytes, _packets;
++#ifndef IGB_NO_LRO
++ u32 flushed = 0, coal = 0;
++ struct igb_q_vector *q_vector;
++#endif
+
+ #define PHY_IDLE_ERROR_COUNT_MASK 0x00FF
+
+- /* Prevent stats update while adapter is being reset, or if the pci
++ /*
++ * Prevent stats update while adapter is being reset, or if the pci
+ * connection is down.
+ */
+ if (adapter->link_speed == 0)
+ return;
++#ifdef HAVE_PCI_ERS
+ if (pci_channel_offline(pdev))
+ return;
+
++#endif
++#ifndef IGB_NO_LRO
++ for (i = 0; i < adapter->num_q_vectors; i++) {
++ q_vector = adapter->q_vector[i];
++ if (!q_vector)
++ continue;
++ flushed += q_vector->lrolist.stats.flushed;
++ coal += q_vector->lrolist.stats.coal;
++ }
++ adapter->lro_stats.flushed = flushed;
++ adapter->lro_stats.coal = coal;
++
++#endif
+ bytes = 0;
+ packets = 0;
+-
+- rcu_read_lock();
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ struct igb_ring *ring = adapter->rx_ring[i];
+- u32 rqdpc = rd32(E1000_RQDPC(i));
+- if (hw->mac.type >= e1000_i210)
+- wr32(E1000_RQDPC(i), 0);
++ u32 rqdpc_tmp = E1000_READ_REG(hw, E1000_RQDPC(i)) & 0x0FFF;
+
+- if (rqdpc) {
+- ring->rx_stats.drops += rqdpc;
+- net_stats->rx_fifo_errors += rqdpc;
++ if (hw->mac.type >= e1000_i210)
++ E1000_WRITE_REG(hw, E1000_RQDPC(i), 0);
++ ring->rx_stats.drops += rqdpc_tmp;
++ net_stats->rx_fifo_errors += rqdpc_tmp;
++#ifdef CONFIG_IGB_VMDQ_NETDEV
++ if (!ring->vmdq_netdev) {
++ bytes += ring->rx_stats.bytes;
++ packets += ring->rx_stats.packets;
+ }
+-
+- do {
+- start = u64_stats_fetch_begin_irq(&ring->rx_syncp);
+- _bytes = ring->rx_stats.bytes;
+- _packets = ring->rx_stats.packets;
+- } while (u64_stats_fetch_retry_irq(&ring->rx_syncp, start));
+- bytes += _bytes;
+- packets += _packets;
++#else
++ bytes += ring->rx_stats.bytes;
++ packets += ring->rx_stats.packets;
++#endif
+ }
+
+ net_stats->rx_bytes = bytes;
+@@ -5241,98 +5949,98 @@
+ packets = 0;
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ struct igb_ring *ring = adapter->tx_ring[i];
+- do {
+- start = u64_stats_fetch_begin_irq(&ring->tx_syncp);
+- _bytes = ring->tx_stats.bytes;
+- _packets = ring->tx_stats.packets;
+- } while (u64_stats_fetch_retry_irq(&ring->tx_syncp, start));
+- bytes += _bytes;
+- packets += _packets;
++#ifdef CONFIG_IGB_VMDQ_NETDEV
++ if (!ring->vmdq_netdev) {
++ bytes += ring->tx_stats.bytes;
++ packets += ring->tx_stats.packets;
++ }
++#else
++ bytes += ring->tx_stats.bytes;
++ packets += ring->tx_stats.packets;
++#endif
+ }
+ net_stats->tx_bytes = bytes;
+ net_stats->tx_packets = packets;
+- rcu_read_unlock();
+
+ /* read stats registers */
+- adapter->stats.crcerrs += rd32(E1000_CRCERRS);
+- adapter->stats.gprc += rd32(E1000_GPRC);
+- adapter->stats.gorc += rd32(E1000_GORCL);
+- rd32(E1000_GORCH); /* clear GORCL */
+- adapter->stats.bprc += rd32(E1000_BPRC);
+- adapter->stats.mprc += rd32(E1000_MPRC);
+- adapter->stats.roc += rd32(E1000_ROC);
+-
+- adapter->stats.prc64 += rd32(E1000_PRC64);
+- adapter->stats.prc127 += rd32(E1000_PRC127);
+- adapter->stats.prc255 += rd32(E1000_PRC255);
+- adapter->stats.prc511 += rd32(E1000_PRC511);
+- adapter->stats.prc1023 += rd32(E1000_PRC1023);
+- adapter->stats.prc1522 += rd32(E1000_PRC1522);
+- adapter->stats.symerrs += rd32(E1000_SYMERRS);
+- adapter->stats.sec += rd32(E1000_SEC);
++ adapter->stats.crcerrs += E1000_READ_REG(hw, E1000_CRCERRS);
++ adapter->stats.gprc += E1000_READ_REG(hw, E1000_GPRC);
++ adapter->stats.gorc += E1000_READ_REG(hw, E1000_GORCL);
++ E1000_READ_REG(hw, E1000_GORCH); /* clear GORCL */
++ adapter->stats.bprc += E1000_READ_REG(hw, E1000_BPRC);
++ adapter->stats.mprc += E1000_READ_REG(hw, E1000_MPRC);
++ adapter->stats.roc += E1000_READ_REG(hw, E1000_ROC);
++
++ adapter->stats.prc64 += E1000_READ_REG(hw, E1000_PRC64);
++ adapter->stats.prc127 += E1000_READ_REG(hw, E1000_PRC127);
++ adapter->stats.prc255 += E1000_READ_REG(hw, E1000_PRC255);
++ adapter->stats.prc511 += E1000_READ_REG(hw, E1000_PRC511);
++ adapter->stats.prc1023 += E1000_READ_REG(hw, E1000_PRC1023);
++ adapter->stats.prc1522 += E1000_READ_REG(hw, E1000_PRC1522);
++ adapter->stats.symerrs += E1000_READ_REG(hw, E1000_SYMERRS);
++ adapter->stats.sec += E1000_READ_REG(hw, E1000_SEC);
+
+- mpc = rd32(E1000_MPC);
++ mpc = E1000_READ_REG(hw, E1000_MPC);
+ adapter->stats.mpc += mpc;
+ net_stats->rx_fifo_errors += mpc;
+- adapter->stats.scc += rd32(E1000_SCC);
+- adapter->stats.ecol += rd32(E1000_ECOL);
+- adapter->stats.mcc += rd32(E1000_MCC);
+- adapter->stats.latecol += rd32(E1000_LATECOL);
+- adapter->stats.dc += rd32(E1000_DC);
+- adapter->stats.rlec += rd32(E1000_RLEC);
+- adapter->stats.xonrxc += rd32(E1000_XONRXC);
+- adapter->stats.xontxc += rd32(E1000_XONTXC);
+- adapter->stats.xoffrxc += rd32(E1000_XOFFRXC);
+- adapter->stats.xofftxc += rd32(E1000_XOFFTXC);
+- adapter->stats.fcruc += rd32(E1000_FCRUC);
+- adapter->stats.gptc += rd32(E1000_GPTC);
+- adapter->stats.gotc += rd32(E1000_GOTCL);
+- rd32(E1000_GOTCH); /* clear GOTCL */
+- adapter->stats.rnbc += rd32(E1000_RNBC);
+- adapter->stats.ruc += rd32(E1000_RUC);
+- adapter->stats.rfc += rd32(E1000_RFC);
+- adapter->stats.rjc += rd32(E1000_RJC);
+- adapter->stats.tor += rd32(E1000_TORH);
+- adapter->stats.tot += rd32(E1000_TOTH);
+- adapter->stats.tpr += rd32(E1000_TPR);
+-
+- adapter->stats.ptc64 += rd32(E1000_PTC64);
+- adapter->stats.ptc127 += rd32(E1000_PTC127);
+- adapter->stats.ptc255 += rd32(E1000_PTC255);
+- adapter->stats.ptc511 += rd32(E1000_PTC511);
+- adapter->stats.ptc1023 += rd32(E1000_PTC1023);
+- adapter->stats.ptc1522 += rd32(E1000_PTC1522);
+-
+- adapter->stats.mptc += rd32(E1000_MPTC);
+- adapter->stats.bptc += rd32(E1000_BPTC);
+-
+- adapter->stats.tpt += rd32(E1000_TPT);
+- adapter->stats.colc += rd32(E1000_COLC);
+-
+- adapter->stats.algnerrc += rd32(E1000_ALGNERRC);
+- /* read internal phy specific stats */
+- reg = rd32(E1000_CTRL_EXT);
++ adapter->stats.scc += E1000_READ_REG(hw, E1000_SCC);
++ adapter->stats.ecol += E1000_READ_REG(hw, E1000_ECOL);
++ adapter->stats.mcc += E1000_READ_REG(hw, E1000_MCC);
++ adapter->stats.latecol += E1000_READ_REG(hw, E1000_LATECOL);
++ adapter->stats.dc += E1000_READ_REG(hw, E1000_DC);
++ adapter->stats.rlec += E1000_READ_REG(hw, E1000_RLEC);
++ adapter->stats.xonrxc += E1000_READ_REG(hw, E1000_XONRXC);
++ adapter->stats.xontxc += E1000_READ_REG(hw, E1000_XONTXC);
++ adapter->stats.xoffrxc += E1000_READ_REG(hw, E1000_XOFFRXC);
++ adapter->stats.xofftxc += E1000_READ_REG(hw, E1000_XOFFTXC);
++ adapter->stats.fcruc += E1000_READ_REG(hw, E1000_FCRUC);
++ adapter->stats.gptc += E1000_READ_REG(hw, E1000_GPTC);
++ adapter->stats.gotc += E1000_READ_REG(hw, E1000_GOTCL);
++ E1000_READ_REG(hw, E1000_GOTCH); /* clear GOTCL */
++ adapter->stats.rnbc += E1000_READ_REG(hw, E1000_RNBC);
++ adapter->stats.ruc += E1000_READ_REG(hw, E1000_RUC);
++ adapter->stats.rfc += E1000_READ_REG(hw, E1000_RFC);
++ adapter->stats.rjc += E1000_READ_REG(hw, E1000_RJC);
++ adapter->stats.tor += E1000_READ_REG(hw, E1000_TORH);
++ adapter->stats.tot += E1000_READ_REG(hw, E1000_TOTH);
++ adapter->stats.tpr += E1000_READ_REG(hw, E1000_TPR);
++
++ adapter->stats.ptc64 += E1000_READ_REG(hw, E1000_PTC64);
++ adapter->stats.ptc127 += E1000_READ_REG(hw, E1000_PTC127);
++ adapter->stats.ptc255 += E1000_READ_REG(hw, E1000_PTC255);
++ adapter->stats.ptc511 += E1000_READ_REG(hw, E1000_PTC511);
++ adapter->stats.ptc1023 += E1000_READ_REG(hw, E1000_PTC1023);
++ adapter->stats.ptc1522 += E1000_READ_REG(hw, E1000_PTC1522);
++
++ adapter->stats.mptc += E1000_READ_REG(hw, E1000_MPTC);
++ adapter->stats.bptc += E1000_READ_REG(hw, E1000_BPTC);
++
++ adapter->stats.tpt += E1000_READ_REG(hw, E1000_TPT);
++ adapter->stats.colc += E1000_READ_REG(hw, E1000_COLC);
++
++ adapter->stats.algnerrc += E1000_READ_REG(hw, E1000_ALGNERRC);
++ /* read internal phy sepecific stats */
++ reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
+ if (!(reg & E1000_CTRL_EXT_LINK_MODE_MASK)) {
+- adapter->stats.rxerrc += rd32(E1000_RXERRC);
++ adapter->stats.rxerrc += E1000_READ_REG(hw, E1000_RXERRC);
+
+ /* this stat has invalid values on i210/i211 */
+ if ((hw->mac.type != e1000_i210) &&
+ (hw->mac.type != e1000_i211))
+- adapter->stats.tncrs += rd32(E1000_TNCRS);
++ adapter->stats.tncrs += E1000_READ_REG(hw, E1000_TNCRS);
+ }
++ adapter->stats.tsctc += E1000_READ_REG(hw, E1000_TSCTC);
++ adapter->stats.tsctfc += E1000_READ_REG(hw, E1000_TSCTFC);
+
+- adapter->stats.tsctc += rd32(E1000_TSCTC);
+- adapter->stats.tsctfc += rd32(E1000_TSCTFC);
+-
+- adapter->stats.iac += rd32(E1000_IAC);
+- adapter->stats.icrxoc += rd32(E1000_ICRXOC);
+- adapter->stats.icrxptc += rd32(E1000_ICRXPTC);
+- adapter->stats.icrxatc += rd32(E1000_ICRXATC);
+- adapter->stats.ictxptc += rd32(E1000_ICTXPTC);
+- adapter->stats.ictxatc += rd32(E1000_ICTXATC);
+- adapter->stats.ictxqec += rd32(E1000_ICTXQEC);
+- adapter->stats.ictxqmtc += rd32(E1000_ICTXQMTC);
+- adapter->stats.icrxdmtc += rd32(E1000_ICRXDMTC);
++ adapter->stats.iac += E1000_READ_REG(hw, E1000_IAC);
++ adapter->stats.icrxoc += E1000_READ_REG(hw, E1000_ICRXOC);
++ adapter->stats.icrxptc += E1000_READ_REG(hw, E1000_ICRXPTC);
++ adapter->stats.icrxatc += E1000_READ_REG(hw, E1000_ICRXATC);
++ adapter->stats.ictxptc += E1000_READ_REG(hw, E1000_ICTXPTC);
++ adapter->stats.ictxatc += E1000_READ_REG(hw, E1000_ICTXATC);
++ adapter->stats.ictxqec += E1000_READ_REG(hw, E1000_ICTXQEC);
++ adapter->stats.ictxqmtc += E1000_READ_REG(hw, E1000_ICTXQMTC);
++ adapter->stats.icrxdmtc += E1000_READ_REG(hw, E1000_ICRXDMTC);
+
+ /* Fill out the OS statistics structure */
+ net_stats->multicast = adapter->stats.mprc;
+@@ -5365,24 +6073,20 @@
+ /* Phy Stats */
+ if (hw->phy.media_type == e1000_media_type_copper) {
+ if ((adapter->link_speed == SPEED_1000) &&
+- (!igb_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) {
++ (!igb_e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) {
+ phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK;
+ adapter->phy_stats.idle_errors += phy_tmp;
+ }
+ }
+
+ /* Management Stats */
+- adapter->stats.mgptc += rd32(E1000_MGTPTC);
+- adapter->stats.mgprc += rd32(E1000_MGTPRC);
+- adapter->stats.mgpdc += rd32(E1000_MGTPDC);
+-
+- /* OS2BMC Stats */
+- reg = rd32(E1000_MANC);
+- if (reg & E1000_MANC_EN_BMC2OS) {
+- adapter->stats.o2bgptc += rd32(E1000_O2BGPTC);
+- adapter->stats.o2bspc += rd32(E1000_O2BSPC);
+- adapter->stats.b2ospc += rd32(E1000_B2OSPC);
+- adapter->stats.b2ogprc += rd32(E1000_B2OGPRC);
++ adapter->stats.mgptc += E1000_READ_REG(hw, E1000_MGTPTC);
++ adapter->stats.mgprc += E1000_READ_REG(hw, E1000_MGTPRC);
++ if (hw->mac.type > e1000_82580) {
++ adapter->stats.o2bgptc += E1000_READ_REG(hw, E1000_O2BGPTC);
++ adapter->stats.o2bspc += E1000_READ_REG(hw, E1000_O2BSPC);
++ adapter->stats.b2ospc += E1000_READ_REG(hw, E1000_B2OSPC);
++ adapter->stats.b2ogprc += E1000_READ_REG(hw, E1000_B2OGPRC);
+ }
+ }
+
+@@ -5390,7 +6094,7 @@
+ {
+ struct igb_adapter *adapter = data;
+ struct e1000_hw *hw = &adapter->hw;
+- u32 icr = rd32(E1000_ICR);
++ u32 icr = E1000_READ_REG(hw, E1000_ICR);
+ /* reading ICR causes bit 31 of EICR to be cleared */
+
+ if (icr & E1000_ICR_DRSTA)
+@@ -5417,18 +6121,24 @@
+ mod_timer(&adapter->watchdog_timer, jiffies + 1);
+ }
+
++#ifdef HAVE_PTP_1588_CLOCK
+ if (icr & E1000_ICR_TS) {
+- u32 tsicr = rd32(E1000_TSICR);
++ u32 tsicr = E1000_READ_REG(hw, E1000_TSICR);
+
+ if (tsicr & E1000_TSICR_TXTS) {
+ /* acknowledge the interrupt */
+- wr32(E1000_TSICR, E1000_TSICR_TXTS);
++ E1000_WRITE_REG(hw, E1000_TSICR, E1000_TSICR_TXTS);
+ /* retrieve hardware timestamp */
+ schedule_work(&adapter->ptp_tx_work);
+ }
+ }
++#endif /* HAVE_PTP_1588_CLOCK */
+
+- wr32(E1000_EIMS, adapter->eims_other);
++ /* Check for MDD event */
++ if (icr & E1000_ICR_MDDET)
++ igb_process_mdd_event(adapter);
++
++ E1000_WRITE_REG(hw, E1000_EIMS, adapter->eims_other);
+
+ return IRQ_HANDLED;
+ }
+@@ -5465,7 +6175,7 @@
+ return IRQ_HANDLED;
+ }
+
+-#ifdef CONFIG_IGB_DCA
++#ifdef IGB_DCA
+ static void igb_update_tx_dca(struct igb_adapter *adapter,
+ struct igb_ring *tx_ring,
+ int cpu)
+@@ -5474,9 +6184,10 @@
+ u32 txctrl = dca3_get_tag(tx_ring->dev, cpu);
+
+ if (hw->mac.type != e1000_82575)
+- txctrl <<= E1000_DCA_TXCTRL_CPUID_SHIFT;
++ txctrl <<= E1000_DCA_TXCTRL_CPUID_SHIFT_82576;
+
+- /* We can enable relaxed ordering for reads, but not writes when
++ /*
++ * We can enable relaxed ordering for reads, but not writes when
+ * DCA is enabled. This is due to a known issue in some chipsets
+ * which will cause the DCA tag to be cleared.
+ */
+@@ -5484,7 +6195,7 @@
+ E1000_DCA_TXCTRL_DATA_RRO_EN |
+ E1000_DCA_TXCTRL_DESC_DCA_EN;
+
+- wr32(E1000_DCA_TXCTRL(tx_ring->reg_idx), txctrl);
++ E1000_WRITE_REG(hw, E1000_DCA_TXCTRL(tx_ring->reg_idx), txctrl);
+ }
+
+ static void igb_update_rx_dca(struct igb_adapter *adapter,
+@@ -5495,16 +6206,17 @@
+ u32 rxctrl = dca3_get_tag(&adapter->pdev->dev, cpu);
+
+ if (hw->mac.type != e1000_82575)
+- rxctrl <<= E1000_DCA_RXCTRL_CPUID_SHIFT;
++ rxctrl <<= E1000_DCA_RXCTRL_CPUID_SHIFT_82576;
+
+- /* We can enable relaxed ordering for reads, but not writes when
++ /*
++ * We can enable relaxed ordering for reads, but not writes when
+ * DCA is enabled. This is due to a known issue in some chipsets
+ * which will cause the DCA tag to be cleared.
+ */
+ rxctrl |= E1000_DCA_RXCTRL_DESC_RRO_EN |
+ E1000_DCA_RXCTRL_DESC_DCA_EN;
+
+- wr32(E1000_DCA_RXCTRL(rx_ring->reg_idx), rxctrl);
++ E1000_WRITE_REG(hw, E1000_DCA_RXCTRL(rx_ring->reg_idx), rxctrl);
+ }
+
+ static void igb_update_dca(struct igb_q_vector *q_vector)
+@@ -5535,7 +6247,7 @@
+ return;
+
+ /* Always use CB2 mode, difference is masked in the CB driver. */
+- wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_CB2);
++ E1000_WRITE_REG(hw, E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_CB2);
+
+ for (i = 0; i < adapter->num_q_vectors; i++) {
+ adapter->q_vector[i]->cpu = -1;
+@@ -5556,9 +6268,9 @@
+ /* if already enabled, don't do it again */
+ if (adapter->flags & IGB_FLAG_DCA_ENABLED)
+ break;
+- if (dca_add_requester(dev) == 0) {
++ if (dca_add_requester(dev) == E1000_SUCCESS) {
+ adapter->flags |= IGB_FLAG_DCA_ENABLED;
+- dev_info(&pdev->dev, "DCA enabled\n");
++ dev_info(pci_dev_to_dev(pdev), "DCA enabled\n");
+ igb_setup_dca(adapter);
+ break;
+ }
+@@ -5569,14 +6281,15 @@
+ * hanging around in the sysfs model
+ */
+ dca_remove_requester(dev);
+- dev_info(&pdev->dev, "DCA disabled\n");
++ dev_info(pci_dev_to_dev(pdev), "DCA disabled\n");
+ adapter->flags &= ~IGB_FLAG_DCA_ENABLED;
+- wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_DISABLE);
++ E1000_WRITE_REG(hw, E1000_DCA_CTRL,
++ E1000_DCA_CTRL_DCA_DISABLE);
+ }
+ break;
+ }
+
+- return 0;
++ return E1000_SUCCESS;
+ }
+
+ static int igb_notify_dca(struct notifier_block *nb, unsigned long event,
+@@ -5585,27 +6298,29 @@
+ int ret_val;
+
+ ret_val = driver_for_each_device(&igb_driver.driver, NULL, &event,
+- __igb_notify_dca);
++ __igb_notify_dca);
+
+ return ret_val ? NOTIFY_BAD : NOTIFY_DONE;
+ }
+-#endif /* CONFIG_IGB_DCA */
++#endif /* IGB_DCA */
+
+-#ifdef CONFIG_PCI_IOV
+ static int igb_vf_configure(struct igb_adapter *adapter, int vf)
+ {
+ unsigned char mac_addr[ETH_ALEN];
+
+- eth_zero_addr(mac_addr);
++ random_ether_addr(mac_addr);
+ igb_set_vf_mac(adapter, vf, mac_addr);
+
++#ifdef IFLA_VF_MAX
++#ifdef HAVE_VF_SPOOFCHK_CONFIGURE
+ /* By default spoof check is enabled for all VFs */
+ adapter->vf_data[vf].spoofchk_enabled = true;
++#endif
++#endif
+
+- return 0;
++ return true;
+ }
+
+-#endif
+ static void igb_ping_all_vfs(struct igb_adapter *adapter)
+ {
+ struct e1000_hw *hw = &adapter->hw;
+@@ -5616,26 +6331,71 @@
+ ping = E1000_PF_CONTROL_MSG;
+ if (adapter->vf_data[i].flags & IGB_VF_FLAG_CTS)
+ ping |= E1000_VT_MSGTYPE_CTS;
+- igb_write_mbx(hw, &ping, 1, i);
++ e1000_write_mbx(hw, &ping, 1, i);
+ }
+ }
+
++/**
++ * igb_mta_set_ - Set multicast filter table address
++ * @adapter: pointer to the adapter structure
++ * @hash_value: determines the MTA register and bit to set
++ *
++ * The multicast table address is a register array of 32-bit registers.
++ * The hash_value is used to determine what register the bit is in, the
++ * current value is read, the new bit is OR'd in and the new value is
++ * written back into the register.
++ **/
++void igb_mta_set(struct igb_adapter *adapter, u32 hash_value)
++{
++ struct e1000_hw *hw = &adapter->hw;
++ u32 hash_bit, hash_reg, mta;
++
++ /*
++ * The MTA is a register array of 32-bit registers. It is
++ * treated like an array of (32*mta_reg_count) bits. We want to
++ * set bit BitArray[hash_value]. So we figure out what register
++ * the bit is in, read it, OR in the new bit, then write
++ * back the new value. The (hw->mac.mta_reg_count - 1) serves as a
++ * mask to bits 31:5 of the hash value which gives us the
++ * register we're modifying. The hash bit within that register
++ * is determined by the lower 5 bits of the hash value.
++ */
++ hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1);
++ hash_bit = hash_value & 0x1F;
++
++ mta = E1000_READ_REG_ARRAY(hw, E1000_MTA, hash_reg);
++
++ mta |= (1 << hash_bit);
++
++ E1000_WRITE_REG_ARRAY(hw, E1000_MTA, hash_reg, mta);
++ E1000_WRITE_FLUSH(hw);
++}
++
+ static int igb_set_vf_promisc(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
+ {
++
+ struct e1000_hw *hw = &adapter->hw;
+- u32 vmolr = rd32(E1000_VMOLR(vf));
++ u32 vmolr = E1000_READ_REG(hw, E1000_VMOLR(vf));
+ struct vf_data_storage *vf_data = &adapter->vf_data[vf];
+
+ vf_data->flags &= ~(IGB_VF_FLAG_UNI_PROMISC |
+ IGB_VF_FLAG_MULTI_PROMISC);
+ vmolr &= ~(E1000_VMOLR_ROPE | E1000_VMOLR_ROMPE | E1000_VMOLR_MPME);
+
++#ifdef IGB_ENABLE_VF_PROMISC
++ if (*msgbuf & E1000_VF_SET_PROMISC_UNICAST) {
++ vmolr |= E1000_VMOLR_ROPE;
++ vf_data->flags |= IGB_VF_FLAG_UNI_PROMISC;
++ *msgbuf &= ~E1000_VF_SET_PROMISC_UNICAST;
++ }
++#endif
+ if (*msgbuf & E1000_VF_SET_PROMISC_MULTICAST) {
+ vmolr |= E1000_VMOLR_MPME;
+ vf_data->flags |= IGB_VF_FLAG_MULTI_PROMISC;
+ *msgbuf &= ~E1000_VF_SET_PROMISC_MULTICAST;
+ } else {
+- /* if we have hashes and we are clearing a multicast promisc
++ /*
++ * if we have hashes and we are clearing a multicast promisc
+ * flag we need to write the hashes to the MTA as this step
+ * was previously skipped
+ */
+@@ -5646,17 +6406,18 @@
+
+ vmolr |= E1000_VMOLR_ROMPE;
+ for (j = 0; j < vf_data->num_vf_mc_hashes; j++)
+- igb_mta_set(hw, vf_data->vf_mc_hashes[j]);
++ igb_mta_set(adapter, vf_data->vf_mc_hashes[j]);
+ }
+ }
+
+- wr32(E1000_VMOLR(vf), vmolr);
++ E1000_WRITE_REG(hw, E1000_VMOLR(vf), vmolr);
+
+ /* there are flags left unprocessed, likely not supported */
+ if (*msgbuf & E1000_VT_MSGINFO_MASK)
+ return -EINVAL;
+
+ return 0;
++
+ }
+
+ static int igb_set_vf_multicasts(struct igb_adapter *adapter,
+@@ -5694,7 +6455,7 @@
+ int i, j;
+
+ for (i = 0; i < adapter->vfs_allocated_count; i++) {
+- u32 vmolr = rd32(E1000_VMOLR(i));
++ u32 vmolr = E1000_READ_REG(hw, E1000_VMOLR(i));
+
+ vmolr &= ~(E1000_VMOLR_ROMPE | E1000_VMOLR_MPME);
+
+@@ -5706,9 +6467,9 @@
+ } else if (vf_data->num_vf_mc_hashes) {
+ vmolr |= E1000_VMOLR_ROMPE;
+ for (j = 0; j < vf_data->num_vf_mc_hashes; j++)
+- igb_mta_set(hw, vf_data->vf_mc_hashes[j]);
++ igb_mta_set(adapter, vf_data->vf_mc_hashes[j]);
+ }
+- wr32(E1000_VMOLR(i), vmolr);
++ E1000_WRITE_REG(hw, E1000_VMOLR(i), vmolr);
+ }
+ }
+
+@@ -5716,13 +6477,14 @@
+ {
+ struct e1000_hw *hw = &adapter->hw;
+ u32 pool_mask, reg, vid;
++ u16 vlan_default;
+ int i;
+
+ pool_mask = 1 << (E1000_VLVF_POOLSEL_SHIFT + vf);
+
+ /* Find the vlan filter for this id */
+ for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) {
+- reg = rd32(E1000_VLVF(i));
++ reg = E1000_READ_REG(hw, E1000_VLVF(i));
+
+ /* remove the vf from the pool */
+ reg &= ~pool_mask;
+@@ -5732,16 +6494,20 @@
+ (reg & E1000_VLVF_VLANID_ENABLE)) {
+ reg = 0;
+ vid = reg & E1000_VLVF_VLANID_MASK;
+- igb_vfta_set(hw, vid, false);
++ igb_vfta_set(adapter, vid, FALSE);
+ }
+
+- wr32(E1000_VLVF(i), reg);
++ E1000_WRITE_REG(hw, E1000_VLVF(i), reg);
+ }
+
+ adapter->vf_data[vf].vlans_enabled = 0;
++
++ vlan_default = adapter->vf_data[vf].default_vf_vlan_id;
++ if (vlan_default)
++ igb_vlvf_set(adapter, vlan_default, true, vf);
+ }
+
+-static s32 igb_vlvf_set(struct igb_adapter *adapter, u32 vid, bool add, u32 vf)
++s32 igb_vlvf_set(struct igb_adapter *adapter, u32 vid, bool add, u32 vf)
+ {
+ struct e1000_hw *hw = &adapter->hw;
+ u32 reg, i;
+@@ -5751,12 +6517,12 @@
+ return -1;
+
+ /* we only need to do this if VMDq is enabled */
+- if (!adapter->vfs_allocated_count)
++ if (!adapter->vmdq_pools)
+ return -1;
+
+ /* Find the vlan filter for this id */
+ for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) {
+- reg = rd32(E1000_VLVF(i));
++ reg = E1000_READ_REG(hw, E1000_VLVF(i));
+ if ((reg & E1000_VLVF_VLANID_ENABLE) &&
+ vid == (reg & E1000_VLVF_VLANID_MASK))
+ break;
+@@ -5769,7 +6535,7 @@
+ * one without the enable bit set
+ */
+ for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) {
+- reg = rd32(E1000_VLVF(i));
++ reg = E1000_READ_REG(hw, E1000_VLVF(i));
+ if (!(reg & E1000_VLVF_VLANID_ENABLE))
+ break;
+ }
+@@ -5781,26 +6547,26 @@
+ /* if !enabled we need to set this up in vfta */
+ if (!(reg & E1000_VLVF_VLANID_ENABLE)) {
+ /* add VID to filter table */
+- igb_vfta_set(hw, vid, true);
++ igb_vfta_set(adapter, vid, TRUE);
+ reg |= E1000_VLVF_VLANID_ENABLE;
+ }
+ reg &= ~E1000_VLVF_VLANID_MASK;
+ reg |= vid;
+- wr32(E1000_VLVF(i), reg);
++ E1000_WRITE_REG(hw, E1000_VLVF(i), reg);
+
+ /* do not modify RLPML for PF devices */
+ if (vf >= adapter->vfs_allocated_count)
+- return 0;
++ return E1000_SUCCESS;
+
+ if (!adapter->vf_data[vf].vlans_enabled) {
+ u32 size;
+
+- reg = rd32(E1000_VMOLR(vf));
++ reg = E1000_READ_REG(hw, E1000_VMOLR(vf));
+ size = reg & E1000_VMOLR_RLPML_MASK;
+ size += 4;
+ reg &= ~E1000_VMOLR_RLPML_MASK;
+ reg |= size;
+- wr32(E1000_VMOLR(vf), reg);
++ E1000_WRITE_REG(hw, E1000_VMOLR(vf), reg);
+ }
+
+ adapter->vf_data[vf].vlans_enabled++;
+@@ -5812,38 +6578,40 @@
+ /* if pool is empty then remove entry from vfta */
+ if (!(reg & E1000_VLVF_POOLSEL_MASK)) {
+ reg = 0;
+- igb_vfta_set(hw, vid, false);
++ igb_vfta_set(adapter, vid, FALSE);
+ }
+- wr32(E1000_VLVF(i), reg);
++ E1000_WRITE_REG(hw, E1000_VLVF(i), reg);
+
+ /* do not modify RLPML for PF devices */
+ if (vf >= adapter->vfs_allocated_count)
+- return 0;
++ return E1000_SUCCESS;
+
+ adapter->vf_data[vf].vlans_enabled--;
+ if (!adapter->vf_data[vf].vlans_enabled) {
+ u32 size;
+
+- reg = rd32(E1000_VMOLR(vf));
++ reg = E1000_READ_REG(hw, E1000_VMOLR(vf));
+ size = reg & E1000_VMOLR_RLPML_MASK;
+ size -= 4;
+ reg &= ~E1000_VMOLR_RLPML_MASK;
+ reg |= size;
+- wr32(E1000_VMOLR(vf), reg);
++ E1000_WRITE_REG(hw, E1000_VMOLR(vf), reg);
+ }
+ }
+ }
+- return 0;
++ return E1000_SUCCESS;
+ }
+
++#ifdef IFLA_VF_MAX
+ static void igb_set_vmvir(struct igb_adapter *adapter, u32 vid, u32 vf)
+ {
+ struct e1000_hw *hw = &adapter->hw;
+
+ if (vid)
+- wr32(E1000_VMVIR(vf), (vid | E1000_VMVIR_VLANA_DEFAULT));
++ E1000_WRITE_REG(hw, E1000_VMVIR(vf),
++ (vid | E1000_VMVIR_VLANA_DEFAULT));
+ else
+- wr32(E1000_VMVIR(vf), 0);
++ E1000_WRITE_REG(hw, E1000_VMVIR(vf), 0);
+ }
+
+ static int igb_ndo_set_vf_vlan(struct net_device *netdev,
+@@ -5852,7 +6620,9 @@
+ int err = 0;
+ struct igb_adapter *adapter = netdev_priv(netdev);
+
+- if ((vf >= adapter->vfs_allocated_count) || (vlan > 4095) || (qos > 7))
++ /* VLAN IDs accepted range 0-4094 */
++ if ((vf >= adapter->vfs_allocated_count) || (vlan > VLAN_VID_MASK-1)
++ || (qos > 7))
+ return -EINVAL;
+ if (vlan || qos) {
+ err = igb_vlvf_set(adapter, vlan, !!vlan, vf);
+@@ -5862,6 +6632,7 @@
+ igb_set_vmolr(adapter, vf, !vlan);
+ adapter->vf_data[vf].pf_vlan = vlan;
+ adapter->vf_data[vf].pf_qos = qos;
++ igb_set_vf_vlan_strip(adapter, vf, true);
+ dev_info(&adapter->pdev->dev,
+ "Setting VLAN %d, QOS 0x%x on VF %d\n", vlan, qos, vf);
+ if (test_bit(__IGB_DOWN, &adapter->state)) {
+@@ -5871,10 +6642,14 @@
+ "Bring the PF device up before attempting to use the VF device.\n");
+ }
+ } else {
++ if (adapter->vf_data[vf].pf_vlan)
++ dev_info(&adapter->pdev->dev,
++ "Clearing VLAN on VF %d\n", vf);
+ igb_vlvf_set(adapter, adapter->vf_data[vf].pf_vlan,
+- false, vf);
++ false, vf);
+ igb_set_vmvir(adapter, vlan, vf);
+ igb_set_vmolr(adapter, vf, true);
++ igb_set_vf_vlan_strip(adapter, vf, false);
+ adapter->vf_data[vf].pf_vlan = 0;
+ adapter->vf_data[vf].pf_qos = 0;
+ }
+@@ -5882,6 +6657,36 @@
+ return err;
+ }
+
++#ifdef HAVE_VF_SPOOFCHK_CONFIGURE
++static int igb_ndo_set_vf_spoofchk(struct net_device *netdev, int vf,
++ bool setting)
++{
++ struct igb_adapter *adapter = netdev_priv(netdev);
++ struct e1000_hw *hw = &adapter->hw;
++ u32 dtxswc, reg_offset;
++
++ if (!adapter->vfs_allocated_count)
++ return -EOPNOTSUPP;
++
++ if (vf >= adapter->vfs_allocated_count)
++ return -EINVAL;
++
++ reg_offset = (hw->mac.type == e1000_82576) ? E1000_DTXSWC : E1000_TXSWC;
++ dtxswc = E1000_READ_REG(hw, reg_offset);
++ if (setting)
++ dtxswc |= ((1 << vf) |
++ (1 << (vf + E1000_DTXSWC_VLAN_SPOOF_SHIFT)));
++ else
++ dtxswc &= ~((1 << vf) |
++ (1 << (vf + E1000_DTXSWC_VLAN_SPOOF_SHIFT)));
++ E1000_WRITE_REG(hw, reg_offset, dtxswc);
++
++ adapter->vf_data[vf].spoofchk_enabled = setting;
++ return E1000_SUCCESS;
++}
++#endif /* HAVE_VF_SPOOFCHK_CONFIGURE */
++#endif /* IFLA_VF_MAX */
++
+ static int igb_find_vlvf_entry(struct igb_adapter *adapter, int vid)
+ {
+ struct e1000_hw *hw = &adapter->hw;
+@@ -5890,7 +6695,7 @@
+
+ /* Find the vlan filter for this id */
+ for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) {
+- reg = rd32(E1000_VLVF(i));
++ reg = E1000_READ_REG(hw, E1000_VLVF(i));
+ if ((reg & E1000_VLVF_VLANID_ENABLE) &&
+ vid == (reg & E1000_VLVF_VLANID_MASK))
+ break;
+@@ -5909,6 +6714,11 @@
+ int vid = (msgbuf[1] & E1000_VLVF_VLANID_MASK);
+ int err = 0;
+
++ if (vid)
++ igb_set_vf_vlan_strip(adapter, vf, true);
++ else
++ igb_set_vf_vlan_strip(adapter, vf, false);
++
+ /* If in promiscuous mode we need to make sure the PF also has
+ * the VLAN filter set.
+ */
+@@ -5928,6 +6738,7 @@
+ */
+ if (!add && (adapter->netdev->flags & IFF_PROMISC)) {
+ u32 vlvf, bits;
++
+ int regndx = igb_find_vlvf_entry(adapter, vid);
+
+ if (regndx < 0)
+@@ -5935,7 +6746,7 @@
+ /* See if any other pools are set for this VLAN filter
+ * entry other than the PF.
+ */
+- vlvf = bits = rd32(E1000_VLVF(regndx));
++ vlvf = bits = E1000_READ_REG(hw, E1000_VLVF(regndx));
+ bits &= 1 << (E1000_VLVF_POOLSEL_SHIFT +
+ adapter->vfs_allocated_count);
+ /* If the filter was removed then ensure PF pool bit
+@@ -5943,7 +6754,9 @@
+ * because the PF is in promiscuous mode.
+ */
+ if ((vlvf & VLAN_VID_MASK) == vid &&
++#ifndef HAVE_VLAN_RX_REGISTER
+ !test_bit(vid, adapter->active_vlans) &&
++#endif
+ !bits)
+ igb_vlvf_set(adapter, vid, add,
+ adapter->vfs_allocated_count);
+@@ -5955,7 +6768,9 @@
+
+ static inline void igb_vf_reset(struct igb_adapter *adapter, u32 vf)
+ {
+- /* clear flags - except flag that indicates PF has set the MAC */
++ struct e1000_hw *hw = &adapter->hw;
++
++ /* clear flags except flag that the PF has set the MAC */
+ adapter->vf_data[vf].flags &= IGB_VF_FLAG_PF_SET_MAC;
+ adapter->vf_data[vf].last_nack = jiffies;
+
+@@ -5964,27 +6779,40 @@
+
+ /* reset vlans for device */
+ igb_clear_vf_vfta(adapter, vf);
++#ifdef IFLA_VF_MAX
+ if (adapter->vf_data[vf].pf_vlan)
+ igb_ndo_set_vf_vlan(adapter->netdev, vf,
+ adapter->vf_data[vf].pf_vlan,
+ adapter->vf_data[vf].pf_qos);
+ else
+ igb_clear_vf_vfta(adapter, vf);
++#endif
+
+ /* reset multicast table array for vf */
+ adapter->vf_data[vf].num_vf_mc_hashes = 0;
+
+ /* Flush and reset the mta with the new values */
+ igb_set_rx_mode(adapter->netdev);
++
++ /*
++ * Reset the VFs TDWBAL and TDWBAH registers which are not
++ * cleared by a VFLR
++ */
++ E1000_WRITE_REG(hw, E1000_TDWBAH(vf), 0);
++ E1000_WRITE_REG(hw, E1000_TDWBAL(vf), 0);
++ if (hw->mac.type == e1000_82576) {
++ E1000_WRITE_REG(hw, E1000_TDWBAH(IGB_MAX_VF_FUNCTIONS + vf), 0);
++ E1000_WRITE_REG(hw, E1000_TDWBAL(IGB_MAX_VF_FUNCTIONS + vf), 0);
++ }
+ }
+
+ static void igb_vf_reset_event(struct igb_adapter *adapter, u32 vf)
+ {
+ unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses;
+
+- /* clear mac address as we were hotplug removed/added */
++ /* generate a new mac address as we were hotplug removed/added */
+ if (!(adapter->vf_data[vf].flags & IGB_VF_FLAG_PF_SET_MAC))
+- eth_zero_addr(vf_mac);
++ random_ether_addr(vf_mac);
+
+ /* process remaining reset events */
+ igb_vf_reset(adapter, vf);
+@@ -6005,25 +6833,26 @@
+ igb_rar_set_qsel(adapter, vf_mac, rar_entry, vf);
+
+ /* enable transmit and receive for vf */
+- reg = rd32(E1000_VFTE);
+- wr32(E1000_VFTE, reg | (1 << vf));
+- reg = rd32(E1000_VFRE);
+- wr32(E1000_VFRE, reg | (1 << vf));
++ reg = E1000_READ_REG(hw, E1000_VFTE);
++ E1000_WRITE_REG(hw, E1000_VFTE, reg | (1 << vf));
++ reg = E1000_READ_REG(hw, E1000_VFRE);
++ E1000_WRITE_REG(hw, E1000_VFRE, reg | (1 << vf));
+
+ adapter->vf_data[vf].flags |= IGB_VF_FLAG_CTS;
+
+ /* reply to reset with ack and vf mac address */
+ msgbuf[0] = E1000_VF_RESET | E1000_VT_MSGTYPE_ACK;
+- memcpy(addr, vf_mac, ETH_ALEN);
+- igb_write_mbx(hw, msgbuf, 3, vf);
++ memcpy(addr, vf_mac, 6);
++ e1000_write_mbx(hw, msgbuf, 3, vf);
+ }
+
+ static int igb_set_vf_mac_addr(struct igb_adapter *adapter, u32 *msg, int vf)
+ {
+- /* The VF MAC Address is stored in a packed array of bytes
++ /*
++ * The VF MAC Address is stored in a packed array of bytes
+ * starting at the second 32 bit word of the msg array
+ */
+- unsigned char *addr = (char *)&msg[1];
++ unsigned char *addr = (unsigned char *)&msg[1];
+ int err = -1;
+
+ if (is_valid_ether_addr(addr))
+@@ -6041,7 +6870,7 @@
+ /* if device isn't clear to send it shouldn't be reading either */
+ if (!(vf_data->flags & IGB_VF_FLAG_CTS) &&
+ time_after(jiffies, vf_data->last_nack + (2 * HZ))) {
+- igb_write_mbx(hw, &msg, 1, vf);
++ e1000_write_mbx(hw, &msg, 1, vf);
+ vf_data->last_nack = jiffies;
+ }
+ }
+@@ -6054,45 +6883,47 @@
+ struct vf_data_storage *vf_data = &adapter->vf_data[vf];
+ s32 retval;
+
+- retval = igb_read_mbx(hw, msgbuf, E1000_VFMAILBOX_SIZE, vf);
++ retval = e1000_read_mbx(hw, msgbuf, E1000_VFMAILBOX_SIZE, vf);
+
+ if (retval) {
+- /* if receive failed revoke VF CTS stats and restart init */
+- dev_err(&pdev->dev, "Error receiving message from VF\n");
+- vf_data->flags &= ~IGB_VF_FLAG_CTS;
+- if (!time_after(jiffies, vf_data->last_nack + (2 * HZ)))
+- return;
+- goto out;
++ dev_err(pci_dev_to_dev(pdev), "Error receiving message from VF\n");
++ return;
+ }
+
+ /* this is a message we already processed, do nothing */
+ if (msgbuf[0] & (E1000_VT_MSGTYPE_ACK | E1000_VT_MSGTYPE_NACK))
+ return;
+
+- /* until the vf completes a reset it should not be
++ /*
++ * until the vf completes a reset it should not be
+ * allowed to start any configuration.
+ */
++
+ if (msgbuf[0] == E1000_VF_RESET) {
+ igb_vf_reset_msg(adapter, vf);
+ return;
+ }
+
+ if (!(vf_data->flags & IGB_VF_FLAG_CTS)) {
+- if (!time_after(jiffies, vf_data->last_nack + (2 * HZ)))
+- return;
+- retval = -1;
+- goto out;
++ msgbuf[0] = E1000_VT_MSGTYPE_NACK;
++ if (time_after(jiffies, vf_data->last_nack + (2 * HZ))) {
++ e1000_write_mbx(hw, msgbuf, 1, vf);
++ vf_data->last_nack = jiffies;
++ }
++ return;
+ }
+
+ switch ((msgbuf[0] & 0xFFFF)) {
+ case E1000_VF_SET_MAC_ADDR:
+ retval = -EINVAL;
++#ifndef IGB_DISABLE_VF_MAC_SET
+ if (!(vf_data->flags & IGB_VF_FLAG_PF_SET_MAC))
+ retval = igb_set_vf_mac_addr(adapter, msgbuf, vf);
+ else
+- dev_warn(&pdev->dev,
+- "VF %d attempted to override administratively set MAC address\nReload the VF driver to resume operations\n",
+- vf);
++ DPRINTK(DRV, INFO,
++ "VF %d attempted to override administratively set MAC address\nReload the VF driver to resume operations\n",
++ vf);
++#endif
+ break;
+ case E1000_VF_SET_PROMISC:
+ retval = igb_set_vf_promisc(adapter, msgbuf, vf);
+@@ -6105,28 +6936,31 @@
+ break;
+ case E1000_VF_SET_VLAN:
+ retval = -1;
++#ifdef IFLA_VF_MAX
+ if (vf_data->pf_vlan)
+- dev_warn(&pdev->dev,
+- "VF %d attempted to override administratively set VLAN tag\nReload the VF driver to resume operations\n",
+- vf);
++ DPRINTK(DRV, INFO,
++ "VF %d attempted to override administratively set VLAN tag\nReload the VF driver to resume operations\n",
++ vf);
+ else
++#endif
+ retval = igb_set_vf_vlan(adapter, msgbuf, vf);
+ break;
+ default:
+- dev_err(&pdev->dev, "Unhandled Msg %08x\n", msgbuf[0]);
+- retval = -1;
++ dev_err(pci_dev_to_dev(pdev), "Unhandled Msg %08x\n",
++ msgbuf[0]);
++ retval = -E1000_ERR_MBX;
+ break;
+ }
+
+- msgbuf[0] |= E1000_VT_MSGTYPE_CTS;
+-out:
+ /* notify the VF of the results of what it sent us */
+ if (retval)
+ msgbuf[0] |= E1000_VT_MSGTYPE_NACK;
+ else
+ msgbuf[0] |= E1000_VT_MSGTYPE_ACK;
+
+- igb_write_mbx(hw, msgbuf, 1, vf);
++ msgbuf[0] |= E1000_VT_MSGTYPE_CTS;
++
++ e1000_write_mbx(hw, msgbuf, 1, vf);
+ }
+
+ static void igb_msg_task(struct igb_adapter *adapter)
+@@ -6136,15 +6970,15 @@
+
+ for (vf = 0; vf < adapter->vfs_allocated_count; vf++) {
+ /* process any reset requests */
+- if (!igb_check_for_rst(hw, vf))
++ if (!e1000_check_for_rst(hw, vf))
+ igb_vf_reset_event(adapter, vf);
+
+ /* process any messages pending */
+- if (!igb_check_for_msg(hw, vf))
++ if (!e1000_check_for_msg(hw, vf))
+ igb_rcv_msg_from_vf(adapter, vf);
+
+ /* process any acks */
+- if (!igb_check_for_ack(hw, vf))
++ if (!e1000_check_for_ack(hw, vf))
+ igb_rcv_ack_from_vf(adapter, vf);
+ }
+ }
+@@ -6169,17 +7003,17 @@
+ return;
+
+ /* we only need to do this if VMDq is enabled */
+- if (!adapter->vfs_allocated_count)
++ if (!adapter->vmdq_pools)
+ return;
+
+ for (i = 0; i < hw->mac.uta_reg_count; i++)
+- array_wr32(E1000_UTA, i, ~0);
++ E1000_WRITE_REG_ARRAY(hw, E1000_UTA, i, ~0);
+ }
+
+ /**
+- * igb_intr_msi - Interrupt Handler
+- * @irq: interrupt number
+- * @data: pointer to a network interface device structure
++ * igb_intr_msi - Interrupt Handler
++ * @irq: interrupt number
++ * @data: pointer to a network interface device structure
+ **/
+ static irqreturn_t igb_intr_msi(int irq, void *data)
+ {
+@@ -6187,7 +7021,7 @@
+ struct igb_q_vector *q_vector = adapter->q_vector[0];
+ struct e1000_hw *hw = &adapter->hw;
+ /* read ICR disables interrupts using IAM */
+- u32 icr = rd32(E1000_ICR);
++ u32 icr = E1000_READ_REG(hw, E1000_ICR);
+
+ igb_write_itr(q_vector);
+
+@@ -6205,16 +7039,18 @@
+ mod_timer(&adapter->watchdog_timer, jiffies + 1);
+ }
+
++#ifdef HAVE_PTP_1588_CLOCK
+ if (icr & E1000_ICR_TS) {
+- u32 tsicr = rd32(E1000_TSICR);
++ u32 tsicr = E1000_READ_REG(hw, E1000_TSICR);
+
+ if (tsicr & E1000_TSICR_TXTS) {
+ /* acknowledge the interrupt */
+- wr32(E1000_TSICR, E1000_TSICR_TXTS);
++ E1000_WRITE_REG(hw, E1000_TSICR, E1000_TSICR_TXTS);
+ /* retrieve hardware timestamp */
+ schedule_work(&adapter->ptp_tx_work);
+ }
+ }
++#endif /* HAVE_PTP_1588_CLOCK */
+
+ napi_schedule(&q_vector->napi);
+
+@@ -6222,9 +7058,9 @@
+ }
+
+ /**
+- * igb_intr - Legacy Interrupt Handler
+- * @irq: interrupt number
+- * @data: pointer to a network interface device structure
++ * igb_intr - Legacy Interrupt Handler
++ * @irq: interrupt number
++ * @data: pointer to a network interface device structure
+ **/
+ static irqreturn_t igb_intr(int irq, void *data)
+ {
+@@ -6234,7 +7070,7 @@
+ /* Interrupt Auto-Mask...upon reading ICR, interrupts are masked. No
+ * need for the IMC write
+ */
+- u32 icr = rd32(E1000_ICR);
++ u32 icr = E1000_READ_REG(hw, E1000_ICR);
+
+ /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is
+ * not set, then the adapter didn't send an interrupt
+@@ -6259,23 +7095,25 @@
+ mod_timer(&adapter->watchdog_timer, jiffies + 1);
+ }
+
++#ifdef HAVE_PTP_1588_CLOCK
+ if (icr & E1000_ICR_TS) {
+- u32 tsicr = rd32(E1000_TSICR);
++ u32 tsicr = E1000_READ_REG(hw, E1000_TSICR);
+
+ if (tsicr & E1000_TSICR_TXTS) {
+ /* acknowledge the interrupt */
+- wr32(E1000_TSICR, E1000_TSICR_TXTS);
++ E1000_WRITE_REG(hw, E1000_TSICR, E1000_TSICR_TXTS);
+ /* retrieve hardware timestamp */
+ schedule_work(&adapter->ptp_tx_work);
+ }
+ }
++#endif /* HAVE_PTP_1588_CLOCK */
+
+ napi_schedule(&q_vector->napi);
+
+ return IRQ_HANDLED;
+ }
+
+-static void igb_ring_irq_enable(struct igb_q_vector *q_vector)
++void igb_ring_irq_enable(struct igb_q_vector *q_vector)
+ {
+ struct igb_adapter *adapter = q_vector->adapter;
+ struct e1000_hw *hw = &adapter->hw;
+@@ -6289,26 +7127,25 @@
+ }
+
+ if (!test_bit(__IGB_DOWN, &adapter->state)) {
+- if (adapter->flags & IGB_FLAG_HAS_MSIX)
+- wr32(E1000_EIMS, q_vector->eims_value);
++ if (adapter->msix_entries)
++ E1000_WRITE_REG(hw, E1000_EIMS, q_vector->eims_value);
+ else
+ igb_irq_enable(adapter);
+ }
+ }
+
+ /**
+- * igb_poll - NAPI Rx polling callback
+- * @napi: napi polling structure
+- * @budget: count of how many packets we should handle
++ * igb_poll - NAPI Rx polling callback
++ * @napi: napi polling structure
++ * @budget: count of how many packets we should handle
+ **/
+ static int igb_poll(struct napi_struct *napi, int budget)
+ {
+ struct igb_q_vector *q_vector = container_of(napi,
+- struct igb_q_vector,
+- napi);
++ struct igb_q_vector, napi);
+ bool clean_complete = true;
+
+-#ifdef CONFIG_IGB_DCA
++#ifdef IGB_DCA
+ if (q_vector->adapter->flags & IGB_FLAG_DCA_ENABLED)
+ igb_update_dca(q_vector);
+ #endif
+@@ -6318,6 +7155,12 @@
+ if (q_vector->rx.ring)
+ clean_complete &= igb_clean_rx_irq(q_vector, budget);
+
++#ifndef HAVE_NETDEV_NAPI_LIST
++ /* if netdev is disabled we need to stop polling */
++ if (!netif_running(q_vector->adapter->netdev))
++ clean_complete = true;
++
++#endif
+ /* If all work not completed, return budget and keep polling */
+ if (!clean_complete)
+ return budget;
+@@ -6330,10 +7173,9 @@
+ }
+
+ /**
+- * igb_clean_tx_irq - Reclaim resources after transmit completes
+- * @q_vector: pointer to q_vector containing needed info
+- *
+- * returns true if ring is completely cleaned
++ * igb_clean_tx_irq - Reclaim resources after transmit completes
++ * @q_vector: pointer to q_vector containing needed info
++ * returns TRUE if ring is completely cleaned
+ **/
+ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
+ {
+@@ -6426,16 +7268,20 @@
+
+ netdev_tx_completed_queue(txring_txq(tx_ring),
+ total_packets, total_bytes);
++
+ i += tx_ring->count;
+ tx_ring->next_to_clean = i;
+- u64_stats_update_begin(&tx_ring->tx_syncp);
+ tx_ring->tx_stats.bytes += total_bytes;
+ tx_ring->tx_stats.packets += total_packets;
+- u64_stats_update_end(&tx_ring->tx_syncp);
+ q_vector->tx.total_bytes += total_bytes;
+ q_vector->tx.total_packets += total_packets;
+
++#ifdef DEBUG
++ if (test_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags) &&
++ !(adapter->disable_hw_reset && adapter->tx_hang_detected)) {
++#else
+ if (test_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags)) {
++#endif
+ struct e1000_hw *hw = &adapter->hw;
+
+ /* Detect a transmit hang in hardware, this serializes the
+@@ -6444,10 +7290,23 @@
+ clear_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags);
+ if (tx_buffer->next_to_watch &&
+ time_after(jiffies, tx_buffer->time_stamp +
+- (adapter->tx_timeout_factor * HZ)) &&
+- !(rd32(E1000_STATUS) & E1000_STATUS_TXOFF)) {
++ (adapter->tx_timeout_factor * HZ))
++ && !(E1000_READ_REG(hw, E1000_STATUS) &
++ E1000_STATUS_TXOFF)) {
+
+ /* detected Tx unit hang */
++#ifdef DEBUG
++ adapter->tx_hang_detected = TRUE;
++ if (adapter->disable_hw_reset) {
++ DPRINTK(DRV, WARNING,
++ "Deactivating netdev watchdog timer\n");
++ if (del_timer(&netdev_ring(tx_ring)->watchdog_timer))
++ dev_put(netdev_ring(tx_ring));
++#ifndef HAVE_NET_DEVICE_OPS
++ netdev_ring(tx_ring)->tx_timeout = NULL;
++#endif
++ }
++#endif /* DEBUG */
+ dev_err(tx_ring->dev,
+ "Detected Tx Unit Hang\n"
+ " Tx Queue <%d>\n"
+@@ -6461,7 +7320,7 @@
+ " jiffies <%lx>\n"
+ " desc.status <%x>\n",
+ tx_ring->queue_index,
+- rd32(E1000_TDH(tx_ring->reg_idx)),
++ E1000_READ_REG(hw, E1000_TDH(tx_ring->reg_idx)),
+ readl(tx_ring->tail),
+ tx_ring->next_to_use,
+ tx_ring->next_to_clean,
+@@ -6469,8 +7328,11 @@
+ tx_buffer->next_to_watch,
+ jiffies,
+ tx_buffer->next_to_watch->wb.status);
+- netif_stop_subqueue(tx_ring->netdev,
+- tx_ring->queue_index);
++ if (netif_is_multiqueue(netdev_ring(tx_ring)))
++ netif_stop_subqueue(netdev_ring(tx_ring),
++ ring_queue_index(tx_ring));
++ else
++ netif_stop_queue(netdev_ring(tx_ring));
+
+ /* we are about to reset, no point in enabling stuff */
+ return true;
+@@ -6479,33 +7341,63 @@
+
+ #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
+ if (unlikely(total_packets &&
+- netif_carrier_ok(tx_ring->netdev) &&
+- igb_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD)) {
++ netif_carrier_ok(netdev_ring(tx_ring)) &&
++ igb_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD)) {
+ /* Make sure that anybody stopping the queue after this
+ * sees the new next_to_clean.
+ */
+ smp_mb();
+- if (__netif_subqueue_stopped(tx_ring->netdev,
+- tx_ring->queue_index) &&
+- !(test_bit(__IGB_DOWN, &adapter->state))) {
+- netif_wake_subqueue(tx_ring->netdev,
+- tx_ring->queue_index);
+-
+- u64_stats_update_begin(&tx_ring->tx_syncp);
+- tx_ring->tx_stats.restart_queue++;
+- u64_stats_update_end(&tx_ring->tx_syncp);
++ if (netif_is_multiqueue(netdev_ring(tx_ring))) {
++ if (__netif_subqueue_stopped(netdev_ring(tx_ring),
++ ring_queue_index(tx_ring)) &&
++ !(test_bit(__IGB_DOWN, &adapter->state))) {
++ netif_wake_subqueue(netdev_ring(tx_ring),
++ ring_queue_index(tx_ring));
++ tx_ring->tx_stats.restart_queue++;
++ }
++ } else {
++ if (netif_queue_stopped(netdev_ring(tx_ring)) &&
++ !(test_bit(__IGB_DOWN, &adapter->state))) {
++ netif_wake_queue(netdev_ring(tx_ring));
++ tx_ring->tx_stats.restart_queue++;
++ }
+ }
+ }
+
+ return !!budget;
+ }
+
++#ifdef HAVE_VLAN_RX_REGISTER
++/**
++ * igb_receive_skb - helper function to handle rx indications
++ * @q_vector: structure containing interrupt and ring information
++ * @skb: packet to send up
++ **/
++static void igb_receive_skb(struct igb_q_vector *q_vector,
++ struct sk_buff *skb)
++{
++ struct vlan_group **vlgrp = netdev_priv(skb->dev);
++
++ if (IGB_CB(skb)->vid) {
++ if (*vlgrp) {
++ vlan_gro_receive(&q_vector->napi, *vlgrp,
++ IGB_CB(skb)->vid, skb);
++ } else {
++ dev_kfree_skb_any(skb);
++ }
++ } else {
++ napi_gro_receive(&q_vector->napi, skb);
++ }
++}
++
++#endif /* HAVE_VLAN_RX_REGISTER */
++#ifndef CONFIG_IGB_DISABLE_PACKET_SPLIT
+ /**
+- * igb_reuse_rx_page - page flip buffer and store it back on the ring
+- * @rx_ring: rx descriptor ring to store buffers on
+- * @old_buff: donor buffer to have page reused
++ * igb_reuse_rx_page - page flip buffer and store it back on the ring
++ * @rx_ring: rx descriptor ring to store buffers on
++ * @old_buff: donor buffer to have page reused
+ *
+- * Synchronizes page for reuse by the adapter
++ * Synchronizes page for reuse by the adapter
+ **/
+ static void igb_reuse_rx_page(struct igb_ring *rx_ring,
+ struct igb_rx_buffer *old_buff)
+@@ -6545,39 +7437,34 @@
+ /* flip page offset to other buffer */
+ rx_buffer->page_offset ^= IGB_RX_BUFSZ;
+
+- /* since we are the only owner of the page and we need to
+- * increment it, just set the value to 2 in order to avoid
+- * an unnecessary locked operation
+- */
+- atomic_set(&page->_count, 2);
+ #else
+ /* move offset up to the next cache line */
+ rx_buffer->page_offset += truesize;
+
+ if (rx_buffer->page_offset > (PAGE_SIZE - IGB_RX_BUFSZ))
+ return false;
++#endif
+
+ /* bump ref count on page before it is given to the stack */
+ get_page(page);
+-#endif
+
+ return true;
+ }
+
+ /**
+- * igb_add_rx_frag - Add contents of Rx buffer to sk_buff
+- * @rx_ring: rx descriptor ring to transact packets on
+- * @rx_buffer: buffer containing page to add
+- * @rx_desc: descriptor containing length of buffer written by hardware
+- * @skb: sk_buff to place the data into
+- *
+- * This function will add the data contained in rx_buffer->page to the skb.
+- * This is done either through a direct copy if the data in the buffer is
+- * less than the skb header size, otherwise it will just attach the page as
+- * a frag to the skb.
++ * igb_add_rx_frag - Add contents of Rx buffer to sk_buff
++ * @rx_ring: rx descriptor ring to transact packets on
++ * @rx_buffer: buffer containing page to add
++ * @rx_desc: descriptor containing length of buffer written by hardware
++ * @skb: sk_buff to place the data into
++ *
++ * This function will add the data contained in rx_buffer->page to the skb.
++ * This is done either through a direct copy if the data in the buffer is
++ * less than the skb header size, otherwise it will just attach the page as
++ * a frag to the skb.
+ *
+- * The function will then update the page offset if necessary and return
+- * true if the buffer can be reused by the adapter.
++ * The function will then update the page offset if necessary and return
++ * true if the buffer can be reused by the adapter.
+ **/
+ static bool igb_add_rx_frag(struct igb_ring *rx_ring,
+ struct igb_rx_buffer *rx_buffer,
+@@ -6585,22 +7472,27 @@
+ struct sk_buff *skb)
+ {
+ struct page *page = rx_buffer->page;
++ unsigned char *va = page_address(page) + rx_buffer->page_offset;
+ unsigned int size = le16_to_cpu(rx_desc->wb.upper.length);
+ #if (PAGE_SIZE < 8192)
+ unsigned int truesize = IGB_RX_BUFSZ;
+ #else
+- unsigned int truesize = ALIGN(size, L1_CACHE_BYTES);
++ unsigned int truesize = SKB_DATA_ALIGN(size);
+ #endif
++ unsigned int pull_len;
+
+- if ((size <= IGB_RX_HDR_LEN) && !skb_is_nonlinear(skb)) {
+- unsigned char *va = page_address(page) + rx_buffer->page_offset;
++ if (unlikely(skb_is_nonlinear(skb)))
++ goto add_tail_frag;
+
+- if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) {
+- igb_ptp_rx_pktstamp(rx_ring->q_vector, va, skb);
+- va += IGB_TS_HDR_LEN;
+- size -= IGB_TS_HDR_LEN;
+- }
++#ifdef HAVE_PTP_1588_CLOCK
++ if (unlikely(igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP))) {
++ igb_ptp_rx_pktstamp(rx_ring->q_vector, va, skb);
++ va += IGB_TS_HDR_LEN;
++ size -= IGB_TS_HDR_LEN;
++ }
++#endif /* HAVE_PTP_1588_CLOCK */
+
++ if (likely(size <= IGB_RX_HDR_LEN)) {
+ memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long)));
+
+ /* we can reuse buffer as-is, just make sure it is local */
+@@ -6612,8 +7504,21 @@
+ return false;
+ }
+
++ /* we need the header to contain the greater of either ETH_HLEN or
++ * 60 bytes if the skb->len is less than 60 for skb_pad.
++ */
++ pull_len = eth_get_headlen(va, IGB_RX_HDR_LEN);
++
++ /* align pull length to size of long to optimize memcpy performance */
++ memcpy(__skb_put(skb, pull_len), va, ALIGN(pull_len, sizeof(long)));
++
++ /* update all of the pointers */
++ va += pull_len;
++ size -= pull_len;
++
++add_tail_frag:
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
+- rx_buffer->page_offset, size, truesize);
++ (unsigned long)va & ~PAGE_MASK, size, truesize);
+
+ return igb_can_reuse_rx_page(rx_buffer, page, truesize);
+ }
+@@ -6648,7 +7553,8 @@
+ return NULL;
+ }
+
+- /* we will be copying header into skb->data in
++ /*
++ * we will be copying header into skb->data in
+ * pskb_may_pull so it is in our interest to prefetch
+ * it now to avoid a possible cache miss
+ */
+@@ -6672,72 +7578,606 @@
+ PAGE_SIZE, DMA_FROM_DEVICE);
+ }
+
+- /* clear contents of rx_buffer */
+- rx_buffer->page = NULL;
++ /* clear contents of rx_buffer */
++ rx_buffer->page = NULL;
++
++ return skb;
++}
++
++#endif
++static inline void igb_rx_checksum(struct igb_ring *ring,
++ union e1000_adv_rx_desc *rx_desc,
++ struct sk_buff *skb)
++{
++ skb_checksum_none_assert(skb);
++
++ /* Ignore Checksum bit is set */
++ if (igb_test_staterr(rx_desc, E1000_RXD_STAT_IXSM))
++ return;
++
++ /* Rx checksum disabled via ethtool */
++ if (!(netdev_ring(ring)->features & NETIF_F_RXCSUM))
++ return;
++
++ /* TCP/UDP checksum error bit is set */
++ if (igb_test_staterr(rx_desc,
++ E1000_RXDEXT_STATERR_TCPE |
++ E1000_RXDEXT_STATERR_IPE)) {
++ /*
++ * work around errata with sctp packets where the TCPE aka
++ * L4E bit is set incorrectly on 64 byte (60 byte w/o crc)
++ * packets, (aka let the stack check the crc32c)
++ */
++ if (!((skb->len == 60) &&
++ test_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags)))
++ ring->rx_stats.csum_err++;
++
++ /* let the stack verify checksum errors */
++ return;
++ }
++ /* It must be a TCP or UDP packet with a valid checksum */
++ if (igb_test_staterr(rx_desc, E1000_RXD_STAT_TCPCS |
++ E1000_RXD_STAT_UDPCS))
++ skb->ip_summed = CHECKSUM_UNNECESSARY;
++}
++
++#ifdef NETIF_F_RXHASH
++static inline void igb_rx_hash(struct igb_ring *ring,
++ union e1000_adv_rx_desc *rx_desc,
++ struct sk_buff *skb)
++{
++ if (netdev_ring(ring)->features & NETIF_F_RXHASH)
++ skb_set_hash(skb,
++ le32_to_cpu(rx_desc->wb.lower.hi_dword.rss),
++ PKT_HASH_TYPE_L3);
++}
++
++#endif
++#ifndef IGB_NO_LRO
++#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT
++/**
++ * igb_merge_active_tail - merge active tail into lro skb
++ * @tail: pointer to active tail in frag_list
++ *
++ * This function merges the length and data of an active tail into the
++ * skb containing the frag_list. It resets the tail's pointer to the head,
++ * but it leaves the heads pointer to tail intact.
++ **/
++static inline struct sk_buff *igb_merge_active_tail(struct sk_buff *tail)
++{
++ struct sk_buff *head = IGB_CB(tail)->head;
++
++ if (!head)
++ return tail;
++
++ head->len += tail->len;
++ head->data_len += tail->len;
++ head->truesize += tail->len;
++
++ IGB_CB(tail)->head = NULL;
++
++ return head;
++}
++
++/**
++ * igb_add_active_tail - adds an active tail into the skb frag_list
++ * @head: pointer to the start of the skb
++ * @tail: pointer to active tail to add to frag_list
++ *
++ * This function adds an active tail to the end of the frag list. This tail
++ * will still be receiving data so we cannot yet ad it's stats to the main
++ * skb. That is done via igb_merge_active_tail.
++ **/
++static inline void igb_add_active_tail(struct sk_buff *head,
++ struct sk_buff *tail)
++{
++ struct sk_buff *old_tail = IGB_CB(head)->tail;
++
++ if (old_tail) {
++ igb_merge_active_tail(old_tail);
++ old_tail->next = tail;
++ } else {
++ skb_shinfo(head)->frag_list = tail;
++ }
++
++ IGB_CB(tail)->head = head;
++ IGB_CB(head)->tail = tail;
++
++ IGB_CB(head)->append_cnt++;
++}
++
++/**
++ * igb_close_active_frag_list - cleanup pointers on a frag_list skb
++ * @head: pointer to head of an active frag list
++ *
++ * This function will clear the frag_tail_tracker pointer on an active
++ * frag_list and returns true if the pointer was actually set
++ **/
++static inline bool igb_close_active_frag_list(struct sk_buff *head)
++{
++ struct sk_buff *tail = IGB_CB(head)->tail;
++
++ if (!tail)
++ return false;
++
++ igb_merge_active_tail(tail);
++
++ IGB_CB(head)->tail = NULL;
++
++ return true;
++}
++
++#endif /* CONFIG_IGB_DISABLE_PACKET_SPLIT */
++/**
++ * igb_can_lro - returns true if packet is TCP/IPV4 and LRO is enabled
++ * @adapter: board private structure
++ * @rx_desc: pointer to the rx descriptor
++ * @skb: pointer to the skb to be merged
++ *
++ **/
++static inline bool igb_can_lro(struct igb_ring *rx_ring,
++ union e1000_adv_rx_desc *rx_desc,
++ struct sk_buff *skb)
++{
++ struct iphdr *iph = (struct iphdr *)skb->data;
++ __le16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
++
++ /* verify hardware indicates this is IPv4/TCP */
++ if ((!(pkt_info & cpu_to_le16(E1000_RXDADV_PKTTYPE_TCP)) ||
++ !(pkt_info & cpu_to_le16(E1000_RXDADV_PKTTYPE_IPV4))))
++ return false;
++
++ /* .. and LRO is enabled */
++ if (!(netdev_ring(rx_ring)->features & NETIF_F_LRO))
++ return false;
++
++ /* .. and we are not in promiscuous mode */
++ if (netdev_ring(rx_ring)->flags & IFF_PROMISC)
++ return false;
++
++ /* .. and the header is large enough for us to read IP/TCP fields */
++ if (!pskb_may_pull(skb, sizeof(struct igb_lrohdr)))
++ return false;
++
++ /* .. and there are no VLANs on packet */
++ if (skb->protocol != htons(ETH_P_IP))
++ return false;
++
++ /* .. and we are version 4 with no options */
++ if (*(u8 *)iph != 0x45)
++ return false;
++
++ /* .. and the packet is not fragmented */
++ if (iph->frag_off & htons(IP_MF | IP_OFFSET))
++ return false;
++
++ /* .. and that next header is TCP */
++ if (iph->protocol != IPPROTO_TCP)
++ return false;
++
++ return true;
++}
++
++static inline struct igb_lrohdr *igb_lro_hdr(struct sk_buff *skb)
++{
++ return (struct igb_lrohdr *)skb->data;
++}
++
++/**
++ * igb_lro_flush - Indicate packets to upper layer.
++ *
++ * Update IP and TCP header part of head skb if more than one
++ * skb's chained and indicate packets to upper layer.
++ **/
++static void igb_lro_flush(struct igb_q_vector *q_vector,
++ struct sk_buff *skb)
++{
++ struct igb_lro_list *lrolist = &q_vector->lrolist;
++
++ __skb_unlink(skb, &lrolist->active);
++
++ if (IGB_CB(skb)->append_cnt) {
++ struct igb_lrohdr *lroh = igb_lro_hdr(skb);
++
++#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT
++ /* close any active lro contexts */
++ igb_close_active_frag_list(skb);
++
++#endif
++ /* incorporate ip header and re-calculate checksum */
++ lroh->iph.tot_len = ntohs(skb->len);
++ lroh->iph.check = 0;
++
++ /* header length is 5 since we know no options exist */
++ lroh->iph.check = ip_fast_csum((u8 *)lroh, 5);
++
++ /* clear TCP checksum to indicate we are an LRO frame */
++ lroh->th.check = 0;
++
++ /* incorporate latest timestamp into the tcp header */
++ if (IGB_CB(skb)->tsecr) {
++ lroh->ts[2] = IGB_CB(skb)->tsecr;
++ lroh->ts[1] = htonl(IGB_CB(skb)->tsval);
++ }
++#ifdef NETIF_F_GSO
++
++#ifdef NAPI_GRO_CB
++ NAPI_GRO_CB(skb)->data_offset = 0;
++#endif
++ skb_shinfo(skb)->gso_size = IGB_CB(skb)->mss;
++ skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
++#endif
++ }
++
++#ifdef HAVE_VLAN_RX_REGISTER
++ igb_receive_skb(q_vector, skb);
++#else
++ napi_gro_receive(&q_vector->napi, skb);
++#endif
++ lrolist->stats.flushed++;
++}
++
++static void igb_lro_flush_all(struct igb_q_vector *q_vector)
++{
++ struct igb_lro_list *lrolist = &q_vector->lrolist;
++ struct sk_buff *skb, *tmp;
++
++ skb_queue_reverse_walk_safe(&lrolist->active, skb, tmp)
++ igb_lro_flush(q_vector, skb);
++}
++
++/*
++ * igb_lro_header_ok - Main LRO function.
++ **/
++static void igb_lro_header_ok(struct sk_buff *skb)
++{
++ struct igb_lrohdr *lroh = igb_lro_hdr(skb);
++ u16 opt_bytes, data_len;
++
++#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT
++ IGB_CB(skb)->tail = NULL;
++#endif
++ IGB_CB(skb)->tsecr = 0;
++ IGB_CB(skb)->append_cnt = 0;
++ IGB_CB(skb)->mss = 0;
++
++ /* ensure that the checksum is valid */
++ if (skb->ip_summed != CHECKSUM_UNNECESSARY)
++ return;
++
++ /* If we see CE codepoint in IP header, packet is not mergeable */
++ if (INET_ECN_is_ce(ipv4_get_dsfield(&lroh->iph)))
++ return;
++
++ /* ensure no bits set besides ack or psh */
++ if (lroh->th.fin || lroh->th.syn || lroh->th.rst ||
++ lroh->th.urg || lroh->th.ece || lroh->th.cwr ||
++ !lroh->th.ack)
++ return;
++
++ /* store the total packet length */
++ data_len = ntohs(lroh->iph.tot_len);
++
++ /* remove any padding from the end of the skb */
++ __pskb_trim(skb, data_len);
++
++ /* remove header length from data length */
++ data_len -= sizeof(struct igb_lrohdr);
++
++ /*
++ * check for timestamps. Since the only option we handle are timestamps,
++ * we only have to handle the simple case of aligned timestamps
++ */
++ opt_bytes = (lroh->th.doff << 2) - sizeof(struct tcphdr);
++ if (opt_bytes != 0) {
++ if ((opt_bytes != TCPOLEN_TSTAMP_ALIGNED) ||
++ !pskb_may_pull(skb, sizeof(struct igb_lrohdr) +
++ TCPOLEN_TSTAMP_ALIGNED) ||
++ (lroh->ts[0] != htonl((TCPOPT_NOP << 24) |
++ (TCPOPT_NOP << 16) |
++ (TCPOPT_TIMESTAMP << 8) |
++ TCPOLEN_TIMESTAMP)) ||
++ (lroh->ts[2] == 0)) {
++ return;
++ }
++
++ IGB_CB(skb)->tsval = ntohl(lroh->ts[1]);
++ IGB_CB(skb)->tsecr = lroh->ts[2];
++
++ data_len -= TCPOLEN_TSTAMP_ALIGNED;
++ }
++
++ /* record data_len as mss for the packet */
++ IGB_CB(skb)->mss = data_len;
++ IGB_CB(skb)->next_seq = ntohl(lroh->th.seq);
++}
++
++#ifndef CONFIG_IGB_DISABLE_PACKET_SPLIT
++static void igb_merge_frags(struct sk_buff *lro_skb, struct sk_buff *new_skb)
++{
++ struct skb_shared_info *sh_info;
++ struct skb_shared_info *new_skb_info;
++ unsigned int data_len;
++
++ sh_info = skb_shinfo(lro_skb);
++ new_skb_info = skb_shinfo(new_skb);
++
++ /* copy frags into the last skb */
++ memcpy(sh_info->frags + sh_info->nr_frags,
++ new_skb_info->frags,
++ new_skb_info->nr_frags * sizeof(skb_frag_t));
++
++ /* copy size data over */
++ sh_info->nr_frags += new_skb_info->nr_frags;
++ data_len = IGB_CB(new_skb)->mss;
++ lro_skb->len += data_len;
++ lro_skb->data_len += data_len;
++ lro_skb->truesize += data_len;
++
++ /* wipe record of data from new_skb */
++ new_skb_info->nr_frags = 0;
++ new_skb->len = new_skb->data_len = 0;
++ dev_kfree_skb_any(new_skb);
++}
++
++#endif /* CONFIG_IGB_DISABLE_PACKET_SPLIT */
++/**
++ * igb_lro_receive - if able, queue skb into lro chain
++ * @q_vector: structure containing interrupt and ring information
++ * @new_skb: pointer to current skb being checked
++ *
++ * Checks whether the skb given is eligible for LRO and if that's
++ * fine chains it to the existing lro_skb based on flowid. If an LRO for
++ * the flow doesn't exist create one.
++ **/
++static void igb_lro_receive(struct igb_q_vector *q_vector,
++ struct sk_buff *new_skb)
++{
++ struct sk_buff *lro_skb;
++ struct igb_lro_list *lrolist = &q_vector->lrolist;
++ struct igb_lrohdr *lroh = igb_lro_hdr(new_skb);
++ __be32 saddr = lroh->iph.saddr;
++ __be32 daddr = lroh->iph.daddr;
++ __be32 tcp_ports = *(__be32 *)&lroh->th;
++ u16 data_len;
++#ifdef HAVE_VLAN_RX_REGISTER
++ u16 vid = IGB_CB(new_skb)->vid;
++#else
++ u16 vid = new_skb->vlan_tci;
++#endif
++
++ igb_lro_header_ok(new_skb);
++
++ /*
++ * we have a packet that might be eligible for LRO,
++ * so see if it matches anything we might expect
++ */
++ skb_queue_walk(&lrolist->active, lro_skb) {
++ if (*(__be32 *)&igb_lro_hdr(lro_skb)->th != tcp_ports ||
++ igb_lro_hdr(lro_skb)->iph.saddr != saddr ||
++ igb_lro_hdr(lro_skb)->iph.daddr != daddr)
++ continue;
++
++#ifdef HAVE_VLAN_RX_REGISTER
++ if (IGB_CB(lro_skb)->vid != vid)
++#else
++ if (lro_skb->vlan_tci != vid)
++#endif
++ continue;
++
++ /* out of order packet */
++ if (IGB_CB(lro_skb)->next_seq != IGB_CB(new_skb)->next_seq) {
++ igb_lro_flush(q_vector, lro_skb);
++ IGB_CB(new_skb)->mss = 0;
++ break;
++ }
++
++ /* TCP timestamp options have changed */
++ if (!IGB_CB(lro_skb)->tsecr != !IGB_CB(new_skb)->tsecr) {
++ igb_lro_flush(q_vector, lro_skb);
++ break;
++ }
++
++ /* make sure timestamp values are increasing */
++ if (IGB_CB(lro_skb)->tsecr &&
++ IGB_CB(lro_skb)->tsval > IGB_CB(new_skb)->tsval) {
++ igb_lro_flush(q_vector, lro_skb);
++ IGB_CB(new_skb)->mss = 0;
++ break;
++ }
++
++ data_len = IGB_CB(new_skb)->mss;
++
++ /* Check for all of the above below
++ * malformed header
++ * no tcp data
++ * resultant packet would be too large
++ * new skb is larger than our current mss
++ * data would remain in header
++ * we would consume more frags then the sk_buff contains
++ * ack sequence numbers changed
++ * window size has changed
++ */
++ if (data_len == 0 ||
++ data_len > IGB_CB(lro_skb)->mss ||
++ data_len > IGB_CB(lro_skb)->free ||
++#ifndef CONFIG_IGB_DISABLE_PACKET_SPLIT
++ data_len != new_skb->data_len ||
++ skb_shinfo(new_skb)->nr_frags >=
++ (MAX_SKB_FRAGS - skb_shinfo(lro_skb)->nr_frags) ||
++#endif
++ igb_lro_hdr(lro_skb)->th.ack_seq != lroh->th.ack_seq ||
++ igb_lro_hdr(lro_skb)->th.window != lroh->th.window) {
++ igb_lro_flush(q_vector, lro_skb);
++ break;
++ }
++
++ /* Remove IP and TCP header*/
++ skb_pull(new_skb, new_skb->len - data_len);
++
++ /* update timestamp and timestamp echo response */
++ IGB_CB(lro_skb)->tsval = IGB_CB(new_skb)->tsval;
++ IGB_CB(lro_skb)->tsecr = IGB_CB(new_skb)->tsecr;
++
++ /* update sequence and free space */
++ IGB_CB(lro_skb)->next_seq += data_len;
++ IGB_CB(lro_skb)->free -= data_len;
++
++ /* update append_cnt */
++ IGB_CB(lro_skb)->append_cnt++;
++
++#ifndef CONFIG_IGB_DISABLE_PACKET_SPLIT
++ /* if header is empty pull pages into current skb */
++ igb_merge_frags(lro_skb, new_skb);
++#else
++ /* chain this new skb in frag_list */
++ igb_add_active_tail(lro_skb, new_skb);
++#endif
++
++ if ((data_len < IGB_CB(lro_skb)->mss) || lroh->th.psh ||
++ skb_shinfo(lro_skb)->nr_frags == MAX_SKB_FRAGS) {
++ igb_lro_hdr(lro_skb)->th.psh |= lroh->th.psh;
++ igb_lro_flush(q_vector, lro_skb);
++ }
++
++ lrolist->stats.coal++;
++ return;
++ }
++
++ if (IGB_CB(new_skb)->mss && !lroh->th.psh) {
++ /* if we are at capacity flush the tail */
++ if (skb_queue_len(&lrolist->active) >= IGB_LRO_MAX) {
++ lro_skb = skb_peek_tail(&lrolist->active);
++ if (lro_skb)
++ igb_lro_flush(q_vector, lro_skb);
++ }
++
++ /* update sequence and free space */
++ IGB_CB(new_skb)->next_seq += IGB_CB(new_skb)->mss;
++ IGB_CB(new_skb)->free = 65521 - new_skb->len;
+
+- return skb;
++ /* .. and insert at the front of the active list */
++ __skb_queue_head(&lrolist->active, new_skb);
++
++ lrolist->stats.coal++;
++ return;
++ }
++
++ /* packet not handled by any of the above, pass it to the stack */
++#ifdef HAVE_VLAN_RX_REGISTER
++ igb_receive_skb(q_vector, new_skb);
++#else
++ napi_gro_receive(&q_vector->napi, new_skb);
++#endif
+ }
+
+-static inline void igb_rx_checksum(struct igb_ring *ring,
++#endif /* IGB_NO_LRO */
++/**
++ * igb_process_skb_fields - Populate skb header fields from Rx descriptor
++ * @rx_ring: rx descriptor ring packet is being transacted on
++ * @rx_desc: pointer to the EOP Rx descriptor
++ * @skb: pointer to current skb being populated
++ *
++ * This function checks the ring, descriptor, and packet information in
++ * order to populate the hash, checksum, VLAN, timestamp, protocol, and
++ * other fields within the skb.
++ **/
++static void igb_process_skb_fields(struct igb_ring *rx_ring,
+ union e1000_adv_rx_desc *rx_desc,
+ struct sk_buff *skb)
+ {
+- skb_checksum_none_assert(skb);
++ struct net_device *dev = rx_ring->netdev;
++ __le16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
++ bool notype;
+
+- /* Ignore Checksum bit is set */
+- if (igb_test_staterr(rx_desc, E1000_RXD_STAT_IXSM))
+- return;
++#ifdef NETIF_F_RXHASH
++ igb_rx_hash(rx_ring, rx_desc, skb);
+
+- /* Rx checksum disabled via ethtool */
+- if (!(ring->netdev->features & NETIF_F_RXCSUM))
+- return;
++#endif
++ igb_rx_checksum(rx_ring, rx_desc, skb);
+
+- /* TCP/UDP checksum error bit is set */
+- if (igb_test_staterr(rx_desc,
+- E1000_RXDEXT_STATERR_TCPE |
+- E1000_RXDEXT_STATERR_IPE)) {
+- /* work around errata with sctp packets where the TCPE aka
+- * L4E bit is set incorrectly on 64 byte (60 byte w/o crc)
+- * packets, (aka let the stack check the crc32c)
+- */
+- if (!((skb->len == 60) &&
+- test_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags))) {
+- u64_stats_update_begin(&ring->rx_syncp);
+- ring->rx_stats.csum_err++;
+- u64_stats_update_end(&ring->rx_syncp);
+- }
+- /* let the stack verify checksum errors */
+- return;
++ /* update packet type stats */
++ switch (pkt_info & E1000_RXDADV_PKTTYPE_ILMASK) {
++ case E1000_RXDADV_PKTTYPE_IPV4:
++ rx_ring->pkt_stats.ipv4_packets++;
++ break;
++ case E1000_RXDADV_PKTTYPE_IPV4_EX:
++ rx_ring->pkt_stats.ipv4e_packets++;
++ break;
++ case E1000_RXDADV_PKTTYPE_IPV6:
++ rx_ring->pkt_stats.ipv6_packets++;
++ break;
++ case E1000_RXDADV_PKTTYPE_IPV6_EX:
++ rx_ring->pkt_stats.ipv6e_packets++;
++ break;
++ default:
++ notype = true;
++ break;
+ }
+- /* It must be a TCP or UDP packet with a valid checksum */
+- if (igb_test_staterr(rx_desc, E1000_RXD_STAT_TCPCS |
+- E1000_RXD_STAT_UDPCS))
+- skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+- dev_dbg(ring->dev, "cksum success: bits %08X\n",
+- le32_to_cpu(rx_desc->wb.upper.status_error));
+-}
++ switch (pkt_info & E1000_RXDADV_PKTTYPE_TLMASK) {
++ case E1000_RXDADV_PKTTYPE_TCP:
++ rx_ring->pkt_stats.tcp_packets++;
++ break;
++ case E1000_RXDADV_PKTTYPE_UDP:
++ rx_ring->pkt_stats.udp_packets++;
++ break;
++ case E1000_RXDADV_PKTTYPE_SCTP:
++ rx_ring->pkt_stats.sctp_packets++;
++ break;
++ case E1000_RXDADV_PKTTYPE_NFS:
++ rx_ring->pkt_stats.nfs_packets++;
++ break;
++ case E1000_RXDADV_PKTTYPE_NONE:
++ if (notype)
++ rx_ring->pkt_stats.other_packets++;
++ break;
++ default:
++ break;
++ }
+
+-static inline void igb_rx_hash(struct igb_ring *ring,
+- union e1000_adv_rx_desc *rx_desc,
+- struct sk_buff *skb)
+-{
+- if (ring->netdev->features & NETIF_F_RXHASH)
+- skb_set_hash(skb,
+- le32_to_cpu(rx_desc->wb.lower.hi_dword.rss),
+- PKT_HASH_TYPE_L3);
++#ifdef HAVE_PTP_1588_CLOCK
++ if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TS) &&
++ !igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP))
++ igb_ptp_rx_rgtstamp(rx_ring->q_vector, skb);
++
++#endif /* HAVE_PTP_1588_CLOCK */
++#ifdef NETIF_F_HW_VLAN_CTAG_RX
++ if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
++#else
++ if ((dev->features & NETIF_F_HW_VLAN_RX) &&
++#endif
++ igb_test_staterr(rx_desc, E1000_RXD_STAT_VP)) {
++ u16 vid = 0;
++
++ if (igb_test_staterr(rx_desc, E1000_RXDEXT_STATERR_LB) &&
++ test_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &rx_ring->flags))
++ vid = be16_to_cpu(rx_desc->wb.upper.vlan);
++ else
++ vid = le16_to_cpu(rx_desc->wb.upper.vlan);
++#ifdef HAVE_VLAN_RX_REGISTER
++ IGB_CB(skb)->vid = vid;
++ } else {
++ IGB_CB(skb)->vid = 0;
++#else
++ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
++#endif
++ }
++
++ skb_record_rx_queue(skb, rx_ring->queue_index);
++
++ skb->protocol = eth_type_trans(skb, dev);
+ }
+
+ /**
+- * igb_is_non_eop - process handling of non-EOP buffers
+- * @rx_ring: Rx ring being processed
+- * @rx_desc: Rx descriptor for current buffer
+- * @skb: current socket buffer containing buffer in progress
+- *
+- * This function updates next to clean. If the buffer is an EOP buffer
+- * this function exits returning false, otherwise it will place the
+- * sk_buff in the next buffer to be chained and return true indicating
+- * that this is in fact a non-EOP buffer.
++ * igb_is_non_eop - process handling of non-EOP buffers
++ * @rx_ring: Rx ring being processed
++ * @rx_desc: Rx descriptor for current buffer
++ *
++ * This function updates next to clean. If the buffer is an EOP buffer
++ * this function exits returning false, otherwise it will place the
++ * sk_buff in the next buffer to be chained and return true indicating
++ * that this is in fact a non-EOP buffer.
+ **/
+ static bool igb_is_non_eop(struct igb_ring *rx_ring,
+ union e1000_adv_rx_desc *rx_desc)
+@@ -6756,200 +8196,134 @@
+ return true;
+ }
+
+-/**
+- * igb_get_headlen - determine size of header for LRO/GRO
+- * @data: pointer to the start of the headers
+- * @max_len: total length of section to find headers in
+- *
+- * This function is meant to determine the length of headers that will
+- * be recognized by hardware for LRO, and GRO offloads. The main
+- * motivation of doing this is to only perform one pull for IPv4 TCP
+- * packets so that we can do basic things like calculating the gso_size
+- * based on the average data per packet.
+- **/
+-static unsigned int igb_get_headlen(unsigned char *data,
+- unsigned int max_len)
+-{
+- union {
+- unsigned char *network;
+- /* l2 headers */
+- struct ethhdr *eth;
+- struct vlan_hdr *vlan;
+- /* l3 headers */
+- struct iphdr *ipv4;
+- struct ipv6hdr *ipv6;
+- } hdr;
+- __be16 protocol;
+- u8 nexthdr = 0; /* default to not TCP */
+- u8 hlen;
+-
+- /* this should never happen, but better safe than sorry */
+- if (max_len < ETH_HLEN)
+- return max_len;
+-
+- /* initialize network frame pointer */
+- hdr.network = data;
+-
+- /* set first protocol and move network header forward */
+- protocol = hdr.eth->h_proto;
+- hdr.network += ETH_HLEN;
+-
+- /* handle any vlan tag if present */
+- if (protocol == htons(ETH_P_8021Q)) {
+- if ((hdr.network - data) > (max_len - VLAN_HLEN))
+- return max_len;
+-
+- protocol = hdr.vlan->h_vlan_encapsulated_proto;
+- hdr.network += VLAN_HLEN;
+- }
+-
+- /* handle L3 protocols */
+- if (protocol == htons(ETH_P_IP)) {
+- if ((hdr.network - data) > (max_len - sizeof(struct iphdr)))
+- return max_len;
+-
+- /* access ihl as a u8 to avoid unaligned access on ia64 */
+- hlen = (hdr.network[0] & 0x0F) << 2;
+-
+- /* verify hlen meets minimum size requirements */
+- if (hlen < sizeof(struct iphdr))
+- return hdr.network - data;
+-
+- /* record next protocol if header is present */
+- if (!(hdr.ipv4->frag_off & htons(IP_OFFSET)))
+- nexthdr = hdr.ipv4->protocol;
+- } else if (protocol == htons(ETH_P_IPV6)) {
+- if ((hdr.network - data) > (max_len - sizeof(struct ipv6hdr)))
+- return max_len;
+-
+- /* record next protocol */
+- nexthdr = hdr.ipv6->nexthdr;
+- hlen = sizeof(struct ipv6hdr);
+- } else {
+- return hdr.network - data;
+- }
++#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT
++/* igb_clean_rx_irq -- * legacy */
++static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, int budget)
++{
++ struct igb_ring *rx_ring = q_vector->rx.ring;
++ unsigned int total_bytes = 0, total_packets = 0;
++ u16 cleaned_count = igb_desc_unused(rx_ring);
+
+- /* relocate pointer to start of L4 header */
+- hdr.network += hlen;
++ do {
++ struct igb_rx_buffer *rx_buffer;
++ union e1000_adv_rx_desc *rx_desc;
++ struct sk_buff *skb;
++ u16 ntc;
+
+- /* finally sort out TCP */
+- if (nexthdr == IPPROTO_TCP) {
+- if ((hdr.network - data) > (max_len - sizeof(struct tcphdr)))
+- return max_len;
++ /* return some buffers to hardware, one at a time is too slow */
++ if (cleaned_count >= IGB_RX_BUFFER_WRITE) {
++ igb_alloc_rx_buffers(rx_ring, cleaned_count);
++ cleaned_count = 0;
++ }
+
+- /* access doff as a u8 to avoid unaligned access on ia64 */
+- hlen = (hdr.network[12] & 0xF0) >> 2;
++ ntc = rx_ring->next_to_clean;
++ rx_desc = IGB_RX_DESC(rx_ring, ntc);
++ rx_buffer = &rx_ring->rx_buffer_info[ntc];
+
+- /* verify hlen meets minimum size requirements */
+- if (hlen < sizeof(struct tcphdr))
+- return hdr.network - data;
++ if (!igb_test_staterr(rx_desc, E1000_RXD_STAT_DD))
++ break;
+
+- hdr.network += hlen;
+- } else if (nexthdr == IPPROTO_UDP) {
+- if ((hdr.network - data) > (max_len - sizeof(struct udphdr)))
+- return max_len;
++ /*
++ * This memory barrier is needed to keep us from reading
++ * any other fields out of the rx_desc until we know the
++ * RXD_STAT_DD bit is set
++ */
++ rmb();
+
+- hdr.network += sizeof(struct udphdr);
+- }
++ skb = rx_buffer->skb;
+
+- /* If everything has gone correctly hdr.network should be the
+- * data section of the packet and will be the end of the header.
+- * If not then it probably represents the end of the last recognized
+- * header.
+- */
+- if ((hdr.network - data) < max_len)
+- return hdr.network - data;
+- else
+- return max_len;
+-}
++ prefetch(skb->data);
+
+-/**
+- * igb_pull_tail - igb specific version of skb_pull_tail
+- * @rx_ring: rx descriptor ring packet is being transacted on
+- * @rx_desc: pointer to the EOP Rx descriptor
+- * @skb: pointer to current skb being adjusted
+- *
+- * This function is an igb specific version of __pskb_pull_tail. The
+- * main difference between this version and the original function is that
+- * this function can make several assumptions about the state of things
+- * that allow for significant optimizations versus the standard function.
+- * As a result we can do things like drop a frag and maintain an accurate
+- * truesize for the skb.
+- */
+-static void igb_pull_tail(struct igb_ring *rx_ring,
+- union e1000_adv_rx_desc *rx_desc,
+- struct sk_buff *skb)
+-{
+- struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
+- unsigned char *va;
+- unsigned int pull_len;
++ /* pull the header of the skb in */
++ __skb_put(skb, le16_to_cpu(rx_desc->wb.upper.length));
+
+- /* it is valid to use page_address instead of kmap since we are
+- * working with pages allocated out of the lomem pool per
+- * alloc_page(GFP_ATOMIC)
+- */
+- va = skb_frag_address(frag);
++ /* clear skb reference in buffer info structure */
++ rx_buffer->skb = NULL;
+
+- if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) {
+- /* retrieve timestamp from buffer */
+- igb_ptp_rx_pktstamp(rx_ring->q_vector, va, skb);
++ cleaned_count++;
+
+- /* update pointers to remove timestamp header */
+- skb_frag_size_sub(frag, IGB_TS_HDR_LEN);
+- frag->page_offset += IGB_TS_HDR_LEN;
+- skb->data_len -= IGB_TS_HDR_LEN;
+- skb->len -= IGB_TS_HDR_LEN;
++ BUG_ON(igb_is_non_eop(rx_ring, rx_desc));
+
+- /* move va to start of packet data */
+- va += IGB_TS_HDR_LEN;
+- }
++ dma_unmap_single(rx_ring->dev, rx_buffer->dma,
++ rx_ring->rx_buffer_len,
++ DMA_FROM_DEVICE);
++ rx_buffer->dma = 0;
+
+- /* we need the header to contain the greater of either ETH_HLEN or
+- * 60 bytes if the skb->len is less than 60 for skb_pad.
+- */
+- pull_len = igb_get_headlen(va, IGB_RX_HDR_LEN);
++ if (igb_test_staterr(rx_desc,
++ E1000_RXDEXT_ERR_FRAME_ERR_MASK)) {
++ dev_kfree_skb_any(skb);
++ continue;
++ }
+
+- /* align pull length to size of long to optimize memcpy performance */
+- skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long)));
++ total_bytes += skb->len;
+
+- /* update all of the pointers */
+- skb_frag_size_sub(frag, pull_len);
+- frag->page_offset += pull_len;
+- skb->data_len -= pull_len;
+- skb->tail += pull_len;
++ /* populate checksum, timestamp, VLAN, and protocol */
++ igb_process_skb_fields(rx_ring, rx_desc, skb);
++
++#ifndef IGB_NO_LRO
++ if (igb_can_lro(rx_ring, rx_desc, skb))
++ igb_lro_receive(q_vector, skb);
++ else
++#endif
++#ifdef HAVE_VLAN_RX_REGISTER
++ igb_receive_skb(q_vector, skb);
++#else
++ napi_gro_receive(&q_vector->napi, skb);
++#endif
++
++#ifndef NETIF_F_GRO
++ netdev_ring(rx_ring)->last_rx = jiffies;
++
++#endif
++ /* update budget accounting */
++ total_packets++;
++ } while (likely(total_packets < budget));
++
++ rx_ring->rx_stats.packets += total_packets;
++ rx_ring->rx_stats.bytes += total_bytes;
++ q_vector->rx.total_packets += total_packets;
++ q_vector->rx.total_bytes += total_bytes;
++
++ if (cleaned_count)
++ igb_alloc_rx_buffers(rx_ring, cleaned_count);
++
++#ifndef IGB_NO_LRO
++ igb_lro_flush_all(q_vector);
++
++#endif /* IGB_NO_LRO */
++ return (total_packets < budget);
+ }
++#else /* CONFIG_IGB_DISABLE_PACKET_SPLIT */
+
+ /**
+- * igb_cleanup_headers - Correct corrupted or empty headers
+- * @rx_ring: rx descriptor ring packet is being transacted on
+- * @rx_desc: pointer to the EOP Rx descriptor
+- * @skb: pointer to current skb being fixed
++ * igb_cleanup_headers - Correct corrupted or empty headers
++ * @rx_ring: rx descriptor ring packet is being transacted on
++ * @rx_desc: pointer to the EOP Rx descriptor
++ * @skb: pointer to current skb being fixed
+ *
+- * Address the case where we are pulling data in on pages only
+- * and as such no data is present in the skb header.
++ * Address the case where we are pulling data in on pages only
++ * and as such no data is present in the skb header.
+ *
+- * In addition if skb is not at least 60 bytes we need to pad it so that
+- * it is large enough to qualify as a valid Ethernet frame.
++ * In addition if skb is not at least 60 bytes we need to pad it so that
++ * it is large enough to qualify as a valid Ethernet frame.
+ *
+- * Returns true if an error was encountered and skb was freed.
++ * Returns true if an error was encountered and skb was freed.
+ **/
+ static bool igb_cleanup_headers(struct igb_ring *rx_ring,
+ union e1000_adv_rx_desc *rx_desc,
+ struct sk_buff *skb)
+ {
++
+ if (unlikely((igb_test_staterr(rx_desc,
+ E1000_RXDEXT_ERR_FRAME_ERR_MASK)))) {
+ struct net_device *netdev = rx_ring->netdev;
++
+ if (!(netdev->features & NETIF_F_RXALL)) {
+ dev_kfree_skb_any(skb);
+ return true;
+ }
+ }
+
+- /* place header in linear portion of buffer */
+- if (skb_is_nonlinear(skb))
+- igb_pull_tail(rx_ring, rx_desc, skb);
+-
+ /* if skb_pad returns an error the skb was freed */
+ if (unlikely(skb->len < 60)) {
+ int pad_len = 60 - skb->len;
+@@ -6962,56 +8336,15 @@
+ return false;
+ }
+
+-/**
+- * igb_process_skb_fields - Populate skb header fields from Rx descriptor
+- * @rx_ring: rx descriptor ring packet is being transacted on
+- * @rx_desc: pointer to the EOP Rx descriptor
+- * @skb: pointer to current skb being populated
+- *
+- * This function checks the ring, descriptor, and packet information in
+- * order to populate the hash, checksum, VLAN, timestamp, protocol, and
+- * other fields within the skb.
+- **/
+-static void igb_process_skb_fields(struct igb_ring *rx_ring,
+- union e1000_adv_rx_desc *rx_desc,
+- struct sk_buff *skb)
+-{
+- struct net_device *dev = rx_ring->netdev;
+-
+- igb_rx_hash(rx_ring, rx_desc, skb);
+-
+- igb_rx_checksum(rx_ring, rx_desc, skb);
+-
+- if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TS) &&
+- !igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP))
+- igb_ptp_rx_rgtstamp(rx_ring->q_vector, skb);
+-
+- if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
+- igb_test_staterr(rx_desc, E1000_RXD_STAT_VP)) {
+- u16 vid;
+-
+- if (igb_test_staterr(rx_desc, E1000_RXDEXT_STATERR_LB) &&
+- test_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &rx_ring->flags))
+- vid = be16_to_cpu(rx_desc->wb.upper.vlan);
+- else
+- vid = le16_to_cpu(rx_desc->wb.upper.vlan);
+-
+- __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
+- }
+-
+- skb_record_rx_queue(skb, rx_ring->queue_index);
+-
+- skb->protocol = eth_type_trans(skb, rx_ring->netdev);
+-}
+-
+-static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
++/* igb_clean_rx_irq -- * packet split */
++static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, int budget)
+ {
+ struct igb_ring *rx_ring = q_vector->rx.ring;
+ struct sk_buff *skb = rx_ring->skb;
+ unsigned int total_bytes = 0, total_packets = 0;
+ u16 cleaned_count = igb_desc_unused(rx_ring);
+
+- while (likely(total_packets < budget)) {
++ do {
+ union e1000_adv_rx_desc *rx_desc;
+
+ /* return some buffers to hardware, one at a time is too slow */
+@@ -7025,7 +8358,8 @@
+ if (!igb_test_staterr(rx_desc, E1000_RXD_STAT_DD))
+ break;
+
+- /* This memory barrier is needed to keep us from reading
++ /*
++ * This memory barrier is needed to keep us from reading
+ * any other fields out of the rx_desc until we know the
+ * RXD_STAT_DD bit is set
+ */
+@@ -7056,31 +8390,89 @@
+ /* populate checksum, timestamp, VLAN, and protocol */
+ igb_process_skb_fields(rx_ring, rx_desc, skb);
+
+- napi_gro_receive(&q_vector->napi, skb);
++#ifndef IGB_NO_LRO
++ if (igb_can_lro(rx_ring, rx_desc, skb))
++ igb_lro_receive(q_vector, skb);
++ else
++#endif
++#ifdef HAVE_VLAN_RX_REGISTER
++ igb_receive_skb(q_vector, skb);
++#else
++ napi_gro_receive(&q_vector->napi, skb);
++#endif
++#ifndef NETIF_F_GRO
++
++ netdev_ring(rx_ring)->last_rx = jiffies;
++#endif
+
+ /* reset skb pointer */
+ skb = NULL;
+
+ /* update budget accounting */
+ total_packets++;
+- }
++ } while (likely(total_packets < budget));
+
+ /* place incomplete frames back on ring for completion */
+ rx_ring->skb = skb;
+
+- u64_stats_update_begin(&rx_ring->rx_syncp);
+ rx_ring->rx_stats.packets += total_packets;
+ rx_ring->rx_stats.bytes += total_bytes;
+- u64_stats_update_end(&rx_ring->rx_syncp);
+ q_vector->rx.total_packets += total_packets;
+ q_vector->rx.total_bytes += total_bytes;
+
+ if (cleaned_count)
+ igb_alloc_rx_buffers(rx_ring, cleaned_count);
+
+- return total_packets < budget;
++#ifndef IGB_NO_LRO
++ igb_lro_flush_all(q_vector);
++
++#endif /* IGB_NO_LRO */
++ return (total_packets < budget);
++}
++#endif /* CONFIG_IGB_DISABLE_PACKET_SPLIT */
++
++#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT
++static bool igb_alloc_mapped_skb(struct igb_ring *rx_ring,
++ struct igb_rx_buffer *bi)
++{
++ struct sk_buff *skb = bi->skb;
++ dma_addr_t dma = bi->dma;
++
++ if (dma)
++ return true;
++
++ if (likely(!skb)) {
++ skb = netdev_alloc_skb_ip_align(netdev_ring(rx_ring),
++ rx_ring->rx_buffer_len);
++ bi->skb = skb;
++ if (!skb) {
++ rx_ring->rx_stats.alloc_failed++;
++ return false;
++ }
++
++ /* initialize skb for ring */
++ skb_record_rx_queue(skb, ring_queue_index(rx_ring));
++ }
++
++ dma = dma_map_single(rx_ring->dev, skb->data,
++ rx_ring->rx_buffer_len, DMA_FROM_DEVICE);
++
++ /* if mapping failed free memory back to system since
++ * there isn't much point in holding memory we can't use
++ */
++ if (dma_mapping_error(rx_ring->dev, dma)) {
++ dev_kfree_skb_any(skb);
++ bi->skb = NULL;
++
++ rx_ring->rx_stats.alloc_failed++;
++ return false;
++ }
++
++ bi->dma = dma;
++ return true;
+ }
+
++#else /* CONFIG_IGB_DISABLE_PACKET_SPLIT */
+ static bool igb_alloc_mapped_page(struct igb_ring *rx_ring,
+ struct igb_rx_buffer *bi)
+ {
+@@ -7092,7 +8484,7 @@
+ return true;
+
+ /* alloc new page for storage */
+- page = __skb_alloc_page(GFP_ATOMIC | __GFP_COLD, NULL);
++ page = alloc_page(GFP_ATOMIC | __GFP_COLD);
+ if (unlikely(!page)) {
+ rx_ring->rx_stats.alloc_failed++;
+ return false;
+@@ -7101,7 +8493,8 @@
+ /* map page for use */
+ dma = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE);
+
+- /* if mapping failed free memory back to system since
++ /*
++ * if mapping failed free memory back to system since
+ * there isn't much point in holding memory we can't use
+ */
+ if (dma_mapping_error(rx_ring->dev, dma)) {
+@@ -7118,9 +8511,10 @@
+ return true;
+ }
+
++#endif /* CONFIG_IGB_DISABLE_PACKET_SPLIT */
+ /**
+- * igb_alloc_rx_buffers - Replace used receive buffers; packet split
+- * @adapter: address of board private structure
++ * igb_alloc_rx_buffers - Replace used receive buffers; packet split
++ * @adapter: address of board private structure
+ **/
+ void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count)
+ {
+@@ -7137,13 +8531,22 @@
+ i -= rx_ring->count;
+
+ do {
++#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT
++ if (!igb_alloc_mapped_skb(rx_ring, bi))
++#else
+ if (!igb_alloc_mapped_page(rx_ring, bi))
++#endif /* CONFIG_IGB_DISABLE_PACKET_SPLIT */
+ break;
+
+- /* Refresh the desc even if buffer_addrs didn't change
++ /*
++ * Refresh the desc even if buffer_addrs didn't change
+ * because each write-back erases this info.
+ */
++#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT
++ rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
++#else
+ rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
++#endif
+
+ rx_desc++;
+ bi++;
+@@ -7166,10 +8569,13 @@
+ /* record the next descriptor to use */
+ rx_ring->next_to_use = i;
+
++#ifndef CONFIG_IGB_DISABLE_PACKET_SPLIT
+ /* update next to alloc since we have filled the ring */
+ rx_ring->next_to_alloc = i;
+
+- /* Force memory writes to complete before letting h/w
++#endif
++ /*
++ * Force memory writes to complete before letting h/w
+ * know there are new descriptors to fetch. (Only
+ * applicable for weak-ordered memory model archs,
+ * such as IA-64).
+@@ -7179,6 +8585,7 @@
+ }
+ }
+
++#ifdef SIOCGMIIPHY
+ /**
+ * igb_mii_ioctl -
+ * @netdev:
+@@ -7198,17 +8605,20 @@
+ data->phy_id = adapter->hw.phy.addr;
+ break;
+ case SIOCGMIIREG:
+- if (igb_read_phy_reg(&adapter->hw, data->reg_num & 0x1F,
+- &data->val_out))
++ if (!capable(CAP_NET_ADMIN))
++ return -EPERM;
++ if (igb_e1000_read_phy_reg(&adapter->hw, data->reg_num & 0x1F,
++ &data->val_out))
+ return -EIO;
+ break;
+ case SIOCSMIIREG:
+ default:
+ return -EOPNOTSUPP;
+ }
+- return 0;
++ return E1000_SUCCESS;
+ }
+
++#endif
+ /**
+ * igb_ioctl -
+ * @netdev:
+@@ -7218,156 +8628,295 @@
+ static int igb_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
+ {
+ switch (cmd) {
++#ifdef SIOCGMIIPHY
+ case SIOCGMIIPHY:
+ case SIOCGMIIREG:
+ case SIOCSMIIREG:
+ return igb_mii_ioctl(netdev, ifr, cmd);
++#endif
++#ifdef HAVE_PTP_1588_CLOCK
++#ifdef SIOCGHWTSTAMP
+ case SIOCGHWTSTAMP:
+ return igb_ptp_get_ts_config(netdev, ifr);
++#endif
+ case SIOCSHWTSTAMP:
+ return igb_ptp_set_ts_config(netdev, ifr);
++#endif /* HAVE_PTP_1588_CLOCK */
++#ifdef ETHTOOL_OPS_COMPAT
++ case SIOCETHTOOL:
++ return ethtool_ioctl(ifr);
++#endif
+ default:
+ return -EOPNOTSUPP;
+ }
+ }
+
+-void igb_read_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value)
++void e1000_read_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value)
+ {
+ struct igb_adapter *adapter = hw->back;
+
+ pci_read_config_word(adapter->pdev, reg, value);
+ }
+
+-void igb_write_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value)
++void e1000_write_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value)
+ {
+ struct igb_adapter *adapter = hw->back;
+
+ pci_write_config_word(adapter->pdev, reg, *value);
+ }
+
+-s32 igb_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
++s32 e1000_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
+ {
+ struct igb_adapter *adapter = hw->back;
++ u16 cap_offset;
+
+- if (pcie_capability_read_word(adapter->pdev, reg, value))
++ cap_offset = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP);
++ if (!cap_offset)
+ return -E1000_ERR_CONFIG;
+
+- return 0;
++ pci_read_config_word(adapter->pdev, cap_offset + reg, value);
++
++ return E1000_SUCCESS;
+ }
+
+-s32 igb_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
++s32 e1000_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
+ {
+ struct igb_adapter *adapter = hw->back;
++ u16 cap_offset;
+
+- if (pcie_capability_write_word(adapter->pdev, reg, *value))
++ cap_offset = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP);
++ if (!cap_offset)
+ return -E1000_ERR_CONFIG;
+
+- return 0;
++ pci_write_config_word(adapter->pdev, cap_offset + reg, *value);
++
++ return E1000_SUCCESS;
+ }
+
+-static void igb_vlan_mode(struct net_device *netdev, netdev_features_t features)
++#ifdef HAVE_VLAN_RX_REGISTER
++static void igb_vlan_mode(struct net_device *netdev, struct vlan_group *vlgrp)
++#else
++void igb_vlan_mode(struct net_device *netdev, u32 features)
++#endif /* HAVE_VLAN_RX_REGISTER */
+ {
+ struct igb_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ u32 ctrl, rctl;
+- bool enable = !!(features & NETIF_F_HW_VLAN_CTAG_RX);
++ bool enable;
++ int i;
++#ifdef HAVE_VLAN_RX_REGISTER
++ enable = !!vlgrp;
++ igb_irq_disable(adapter);
++
++ adapter->vlgrp = vlgrp;
++
++ if (!test_bit(__IGB_DOWN, &adapter->state))
++ igb_irq_enable(adapter);
++#else
++#ifdef NETIF_F_HW_VLAN_CTAG_RX
++ enable = !!(features & NETIF_F_HW_VLAN_CTAG_RX);
++#else
++ enable = !!(features & NETIF_F_HW_VLAN_RX);
++#endif /* NETIF_F_HW_VLAN_CTAG_RX */
++#endif /* HAVE_VLAN_RX_REGISTER */
+
+ if (enable) {
+ /* enable VLAN tag insert/strip */
+- ctrl = rd32(E1000_CTRL);
++ ctrl = E1000_READ_REG(hw, E1000_CTRL);
+ ctrl |= E1000_CTRL_VME;
+- wr32(E1000_CTRL, ctrl);
++ E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
+
+ /* Disable CFI check */
+- rctl = rd32(E1000_RCTL);
++ rctl = E1000_READ_REG(hw, E1000_RCTL);
+ rctl &= ~E1000_RCTL_CFIEN;
+- wr32(E1000_RCTL, rctl);
++ E1000_WRITE_REG(hw, E1000_RCTL, rctl);
+ } else {
+ /* disable VLAN tag insert/strip */
+- ctrl = rd32(E1000_CTRL);
++ ctrl = E1000_READ_REG(hw, E1000_CTRL);
+ ctrl &= ~E1000_CTRL_VME;
+- wr32(E1000_CTRL, ctrl);
++ E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
++ }
++
++#ifndef CONFIG_IGB_VMDQ_NETDEV
++ for (i = 0; i < adapter->vmdq_pools; i++) {
++ igb_set_vf_vlan_strip(adapter,
++ adapter->vfs_allocated_count + i,
++ enable);
++ }
++
++#else
++ igb_set_vf_vlan_strip(adapter,
++ adapter->vfs_allocated_count,
++ enable);
++
++ for (i = 1; i < adapter->vmdq_pools; i++) {
++#ifdef HAVE_VLAN_RX_REGISTER
++ struct igb_vmdq_adapter *vadapter;
++
++ vadapter = netdev_priv(adapter->vmdq_netdev[i-1]);
++
++ enable = !!vadapter->vlgrp;
++#else
++ struct net_device *vnetdev;
++
++ vnetdev = adapter->vmdq_netdev[i-1];
++#ifdef NETIF_F_HW_VLAN_CTAG_RX
++ enable = !!(vnetdev->features & NETIF_F_HW_VLAN_CTAG_RX);
++#else
++ enable = !!(vnetdev->features & NETIF_F_HW_VLAN_RX);
++#endif /* NETIF_F_HW_VLAN_CTAG_RX */
++#endif /* HAVE_VLAN_RX_REGISTER */
++ igb_set_vf_vlan_strip(adapter,
++ adapter->vfs_allocated_count + i,
++ enable);
+ }
+
++#endif /* CONFIG_IGB_VMDQ_NETDEV */
+ igb_rlpml_set(adapter);
+ }
+
++#ifdef HAVE_INT_NDO_VLAN_RX_ADD_VID
++#ifdef NETIF_F_HW_VLAN_CTAG_RX
+ static int igb_vlan_rx_add_vid(struct net_device *netdev,
+- __be16 proto, u16 vid)
++ __always_unused __be16 proto, u16 vid)
++#else
++static int igb_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
++#endif /* NETIF_F_HW_VLAN_CTAG_RX */
++#else
++static void igb_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
++#endif /* HAVE_INT_NDO_VLAN_RX_ADD_VID */
+ {
+ struct igb_adapter *adapter = netdev_priv(netdev);
+- struct e1000_hw *hw = &adapter->hw;
+ int pf_id = adapter->vfs_allocated_count;
+
+ /* attempt to add filter to vlvf array */
+- igb_vlvf_set(adapter, vid, true, pf_id);
++ igb_vlvf_set(adapter, vid, TRUE, pf_id);
+
+ /* add the filter since PF can receive vlans w/o entry in vlvf */
+- igb_vfta_set(hw, vid, true);
++ igb_vfta_set(adapter, vid, TRUE);
++#ifndef HAVE_NETDEV_VLAN_FEATURES
+
+- set_bit(vid, adapter->active_vlans);
++ /* Copy feature flags from netdev to the vlan netdev for this vid.
++ * This allows things like TSO to bubble down to our vlan device.
++ * There is no need to update netdev for vlan 0 (DCB), since it
++ * wouldn't has v_netdev.
++ */
++ if (adapter->vlgrp) {
++ struct vlan_group *vlgrp = adapter->vlgrp;
++ struct net_device *v_netdev = vlan_group_get_device(vlgrp, vid);
+
++ if (v_netdev) {
++ v_netdev->features |= netdev->features;
++ vlan_group_set_device(vlgrp, vid, v_netdev);
++ }
++ }
++#endif /* HAVE_NETDEV_VLAN_FEATURES */
++#ifndef HAVE_VLAN_RX_REGISTER
++
++ set_bit(vid, adapter->active_vlans);
++#endif /* HAVE_VLAN_RX_REGISTER */
++#ifdef HAVE_INT_NDO_VLAN_RX_ADD_VID
+ return 0;
++#endif /* HAVE_INT_NDO_VLAN_RX_ADD_VID */
+ }
+
++#ifdef HAVE_INT_NDO_VLAN_RX_ADD_VID
++#ifdef NETIF_F_HW_VLAN_CTAG_RX
+ static int igb_vlan_rx_kill_vid(struct net_device *netdev,
+- __be16 proto, u16 vid)
++ __always_unused __be16 proto, u16 vid)
++#else
++static int igb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
++#endif /* NETIF_F_HW_VLAN_CTAG_RX */
++#else
++static void igb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
++#endif /* HAVE_INT_NDO_VLAN_RX_ADD_VID */
+ {
+ struct igb_adapter *adapter = netdev_priv(netdev);
+- struct e1000_hw *hw = &adapter->hw;
+ int pf_id = adapter->vfs_allocated_count;
+ s32 err;
+
++#ifdef HAVE_VLAN_RX_REGISTER
++ igb_irq_disable(adapter);
++
++ vlan_group_set_device(adapter->vlgrp, vid, NULL);
++
++ if (!test_bit(__IGB_DOWN, &adapter->state))
++ igb_irq_enable(adapter);
++
++#endif /* HAVE_VLAN_RX_REGISTER */
+ /* remove vlan from VLVF table array */
+- err = igb_vlvf_set(adapter, vid, false, pf_id);
++ err = igb_vlvf_set(adapter, vid, FALSE, pf_id);
+
+ /* if vid was not present in VLVF just remove it from table */
+ if (err)
+- igb_vfta_set(hw, vid, false);
++ igb_vfta_set(adapter, vid, FALSE);
++#ifndef HAVE_VLAN_RX_REGISTER
+
+ clear_bit(vid, adapter->active_vlans);
+-
++#endif /* HAVE_VLAN_RX_REGISTER */
++#ifdef HAVE_INT_NDO_VLAN_RX_ADD_VID
+ return 0;
++#endif /* HAVE_INT_NDO_VLAN_RX_ADD_VID */
+ }
+
+ static void igb_restore_vlan(struct igb_adapter *adapter)
+ {
++#ifdef HAVE_VLAN_RX_REGISTER
++ igb_vlan_mode(adapter->netdev, adapter->vlgrp);
++
++ if (adapter->vlgrp) {
++ u16 vid;
++
++ for (vid = 0; vid < VLAN_N_VID; vid++) {
++ if (!vlan_group_get_device(adapter->vlgrp, vid))
++ continue;
++#ifdef NETIF_F_HW_VLAN_CTAG_RX
++ igb_vlan_rx_add_vid(adapter->netdev,
++ htons(ETH_P_8021Q), vid);
++#else
++ igb_vlan_rx_add_vid(adapter->netdev, vid);
++#endif /* NETIF_F_HW_VLAN_CTAG_RX */
++ }
++ }
++#else
+ u16 vid;
+
+ igb_vlan_mode(adapter->netdev, adapter->netdev->features);
+
+ for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
+- igb_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid);
++#ifdef NETIF_F_HW_VLAN_CTAG_RX
++ igb_vlan_rx_add_vid(adapter->netdev,
++ htons(ETH_P_8021Q), vid);
++#else
++ igb_vlan_rx_add_vid(adapter->netdev, vid);
++#endif /* NETIF_F_HW_VLAN_CTAG_RX */
++#endif /* HAVE_VLAN_RX_REGISTER */
+ }
+
+-int igb_set_spd_dplx(struct igb_adapter *adapter, u32 spd, u8 dplx)
++int igb_set_spd_dplx(struct igb_adapter *adapter, u16 spddplx)
+ {
+ struct pci_dev *pdev = adapter->pdev;
+ struct e1000_mac_info *mac = &adapter->hw.mac;
+
+ mac->autoneg = 0;
+
+- /* Make sure dplx is at most 1 bit and lsb of speed is not set
+- * for the switch() below to work
+- */
+- if ((spd & 1) || (dplx & ~1))
+- goto err_inval;
+-
+- /* Fiber NIC's only allow 1000 gbps Full duplex
+- * and 100Mbps Full duplex for 100baseFx sfp
++ /* SerDes device's does not support 10Mbps Full/duplex
++ * and 100Mbps Half duplex
+ */
+ if (adapter->hw.phy.media_type == e1000_media_type_internal_serdes) {
+- switch (spd + dplx) {
++ switch (spddplx) {
+ case SPEED_10 + DUPLEX_HALF:
+ case SPEED_10 + DUPLEX_FULL:
+ case SPEED_100 + DUPLEX_HALF:
+- goto err_inval;
++ dev_err(pci_dev_to_dev(pdev),
++ "Unsupported Speed/Duplex configuration\n");
++ return -EINVAL;
+ default:
+ break;
+ }
+ }
+
+- switch (spd + dplx) {
++ switch (spddplx) {
+ case SPEED_10 + DUPLEX_HALF:
+ mac->forced_speed_duplex = ADVERTISE_10_HALF;
+ break;
+@@ -7386,17 +8935,52 @@
+ break;
+ case SPEED_1000 + DUPLEX_HALF: /* not supported */
+ default:
+- goto err_inval;
++ dev_err(pci_dev_to_dev(pdev), "Unsupported Speed/Duplex configuration\n");
++ return -EINVAL;
+ }
+
+ /* clear MDI, MDI(-X) override is only allowed when autoneg enabled */
+ adapter->hw.phy.mdix = AUTO_ALL_MODES;
+
+ return 0;
++}
+
+-err_inval:
+- dev_err(&pdev->dev, "Unsupported Speed/Duplex configuration\n");
+- return -EINVAL;
++/* This function should only be called if RTNL lock is held */
++int igb_setup_queues(struct igb_adapter *adapter)
++{
++ struct net_device *dev = adapter->netdev;
++ int err;
++
++ if (adapter->rss_queues == adapter->num_rx_queues) {
++ if (adapter->tss_queues) {
++ if (adapter->tss_queues == adapter->num_tx_queues)
++ return 0;
++ } else if (adapter->vfs_allocated_count ||
++ adapter->rss_queues == adapter->num_tx_queues) {
++ return 0;
++ }
++ }
++
++ /*
++ * Hardware has to reinitialize queues and interrupts to
++ * match the new configuration. Unfortunately, the hardware
++ * is not flexible enough to do this dynamically.
++ */
++ if (netif_running(dev))
++ igb_close(dev);
++
++ igb_clear_interrupt_scheme(adapter);
++
++ err = igb_init_interrupt_scheme(adapter, true);
++ if (err) {
++ dev_close(dev);
++ return err;
++ }
++
++ if (netif_running(dev))
++ err = igb_open(dev);
++
++ return err;
+ }
+
+ static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake,
+@@ -7413,6 +8997,10 @@
+
+ netif_device_detach(netdev);
+
++ status = E1000_READ_REG(hw, E1000_STATUS);
++ if (status & E1000_STATUS_LU)
++ wufc &= ~E1000_WUFC_LNKC;
++
+ if (netif_running(netdev))
+ __igb_close(netdev, true);
+
+@@ -7424,37 +9012,31 @@
+ return retval;
+ #endif
+
+- status = rd32(E1000_STATUS);
+- if (status & E1000_STATUS_LU)
+- wufc &= ~E1000_WUFC_LNKC;
+-
+ if (wufc) {
+ igb_setup_rctl(adapter);
+ igb_set_rx_mode(netdev);
+
+ /* turn on all-multi mode if wake on multicast is enabled */
+ if (wufc & E1000_WUFC_MC) {
+- rctl = rd32(E1000_RCTL);
++ rctl = E1000_READ_REG(hw, E1000_RCTL);
+ rctl |= E1000_RCTL_MPE;
+- wr32(E1000_RCTL, rctl);
++ E1000_WRITE_REG(hw, E1000_RCTL, rctl);
+ }
+
+- ctrl = rd32(E1000_CTRL);
+- /* advertise wake from D3Cold */
+- #define E1000_CTRL_ADVD3WUC 0x00100000
++ ctrl = E1000_READ_REG(hw, E1000_CTRL);
+ /* phy power management enable */
+ #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
+ ctrl |= E1000_CTRL_ADVD3WUC;
+- wr32(E1000_CTRL, ctrl);
++ E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
+
+ /* Allow time for pending master requests to run */
+- igb_disable_pcie_master(hw);
++ e1000_disable_pcie_master(hw);
+
+- wr32(E1000_WUC, E1000_WUC_PME_EN);
+- wr32(E1000_WUFC, wufc);
++ E1000_WRITE_REG(hw, E1000_WUC, E1000_WUC_PME_EN);
++ E1000_WRITE_REG(hw, E1000_WUFC, wufc);
+ } else {
+- wr32(E1000_WUC, 0);
+- wr32(E1000_WUFC, 0);
++ E1000_WRITE_REG(hw, E1000_WUC, 0);
++ E1000_WRITE_REG(hw, E1000_WUFC, 0);
+ }
+
+ *enable_wake = wufc || adapter->en_mng_pt;
+@@ -7474,12 +9056,17 @@
+ }
+
+ #ifdef CONFIG_PM
+-#ifdef CONFIG_PM_SLEEP
++#ifdef HAVE_SYSTEM_SLEEP_PM_OPS
+ static int igb_suspend(struct device *dev)
++#else
++static int igb_suspend(struct pci_dev *pdev, pm_message_t state)
++#endif /* HAVE_SYSTEM_SLEEP_PM_OPS */
+ {
++#ifdef HAVE_SYSTEM_SLEEP_PM_OPS
++ struct pci_dev *pdev = to_pci_dev(dev);
++#endif /* HAVE_SYSTEM_SLEEP_PM_OPS */
+ int retval;
+ bool wake;
+- struct pci_dev *pdev = to_pci_dev(dev);
+
+ retval = __igb_shutdown(pdev, &wake, 0);
+ if (retval)
+@@ -7494,11 +9081,16 @@
+
+ return 0;
+ }
+-#endif /* CONFIG_PM_SLEEP */
+
++#ifdef HAVE_SYSTEM_SLEEP_PM_OPS
+ static int igb_resume(struct device *dev)
++#else
++static int igb_resume(struct pci_dev *pdev)
++#endif /* HAVE_SYSTEM_SLEEP_PM_OPS */
+ {
++#ifdef HAVE_SYSTEM_SLEEP_PM_OPS
+ struct pci_dev *pdev = to_pci_dev(dev);
++#endif /* HAVE_SYSTEM_SLEEP_PM_OPS */
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct igb_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+@@ -7510,7 +9102,7 @@
+
+ err = pci_enable_device_mem(pdev);
+ if (err) {
+- dev_err(&pdev->dev,
++ dev_err(pci_dev_to_dev(pdev),
+ "igb: Cannot enable PCI device from suspend\n");
+ return err;
+ }
+@@ -7520,18 +9112,18 @@
+ pci_enable_wake(pdev, PCI_D3cold, 0);
+
+ if (igb_init_interrupt_scheme(adapter, true)) {
+- dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
++ dev_err(pci_dev_to_dev(pdev),
++ "Unable to allocate memory for queues\n");
+ return -ENOMEM;
+ }
+
+ igb_reset(adapter);
+
+- /* let the f/w know that the h/w is now under the control of the
+- * driver.
++ /* let the f/w know that the h/w is now under the control of the driver.
+ */
+ igb_get_hw_control(adapter);
+
+- wr32(E1000_WUS, ~0);
++ E1000_WRITE_REG(hw, E1000_WUS, ~0);
+
+ if (netdev->flags & IFF_UP) {
+ rtnl_lock();
+@@ -7542,10 +9134,12 @@
+ }
+
+ netif_device_attach(netdev);
++
+ return 0;
+ }
+
+ #ifdef CONFIG_PM_RUNTIME
++#ifdef HAVE_SYSTEM_SLEEP_PM_OPS
+ static int igb_runtime_idle(struct device *dev)
+ {
+ struct pci_dev *pdev = to_pci_dev(dev);
+@@ -7582,91 +9176,51 @@
+ {
+ return igb_resume(dev);
+ }
++#endif /* HAVE_SYSTEM_SLEEP_PM_OPS */
+ #endif /* CONFIG_PM_RUNTIME */
+-#endif
++#endif /* CONFIG_PM */
+
+-static void igb_shutdown(struct pci_dev *pdev)
++#ifdef USE_REBOOT_NOTIFIER
++/* only want to do this for 2.4 kernels? */
++static int igb_notify_reboot(struct notifier_block *nb, unsigned long event,
++ void *p)
+ {
++ struct pci_dev *pdev = NULL;
+ bool wake;
+
+- __igb_shutdown(pdev, &wake, 0);
+-
+- if (system_state == SYSTEM_POWER_OFF) {
+- pci_wake_from_d3(pdev, wake);
+- pci_set_power_state(pdev, PCI_D3hot);
++ switch (event) {
++ case SYS_DOWN:
++ case SYS_HALT:
++ case SYS_POWER_OFF:
++ while ((pdev = pci_find_device(PCI_ANY_ID, PCI_ANY_ID, pdev))) {
++ if (pci_dev_driver(pdev) == &igb_driver) {
++ __igb_shutdown(pdev, &wake, 0);
++ if (event == SYS_POWER_OFF) {
++ pci_wake_from_d3(pdev, wake);
++ pci_set_power_state(pdev, PCI_D3hot);
++ }
++ }
++ }
+ }
++ return NOTIFY_DONE;
+ }
+-
+-#ifdef CONFIG_PCI_IOV
+-static int igb_sriov_reinit(struct pci_dev *dev)
++#else
++static void igb_shutdown(struct pci_dev *pdev)
+ {
+- struct net_device *netdev = pci_get_drvdata(dev);
+- struct igb_adapter *adapter = netdev_priv(netdev);
+- struct pci_dev *pdev = adapter->pdev;
++ bool wake = false;
+
+- rtnl_lock();
+-
+- if (netif_running(netdev))
+- igb_close(netdev);
+- else
+- igb_reset(adapter);
+-
+- igb_clear_interrupt_scheme(adapter);
+-
+- igb_init_queue_configuration(adapter);
++ __igb_shutdown(pdev, &wake, 0);
+
+- if (igb_init_interrupt_scheme(adapter, true)) {
+- dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
+- return -ENOMEM;
++ if (system_state == SYSTEM_POWER_OFF) {
++ pci_wake_from_d3(pdev, wake);
++ pci_set_power_state(pdev, PCI_D3hot);
+ }
+-
+- if (netif_running(netdev))
+- igb_open(netdev);
+-
+- rtnl_unlock();
+-
+- return 0;
+-}
+-
+-static int igb_pci_disable_sriov(struct pci_dev *dev)
+-{
+- int err = igb_disable_sriov(dev);
+-
+- if (!err)
+- err = igb_sriov_reinit(dev);
+-
+- return err;
+-}
+-
+-static int igb_pci_enable_sriov(struct pci_dev *dev, int num_vfs)
+-{
+- int err = igb_enable_sriov(dev, num_vfs);
+-
+- if (err)
+- goto out;
+-
+- err = igb_sriov_reinit(dev);
+- if (!err)
+- return num_vfs;
+-
+-out:
+- return err;
+-}
+-
+-#endif
+-static int igb_pci_sriov_configure(struct pci_dev *dev, int num_vfs)
+-{
+-#ifdef CONFIG_PCI_IOV
+- if (num_vfs == 0)
+- return igb_pci_disable_sriov(dev);
+- else
+- return igb_pci_enable_sriov(dev, num_vfs);
+-#endif
+- return 0;
+ }
++#endif /* USE_REBOOT_NOTIFIER */
+
+ #ifdef CONFIG_NET_POLL_CONTROLLER
+-/* Polling 'interrupt' - used by things like netconsole to send skbs
++/*
++ * Polling 'interrupt' - used by things like netconsole to send skbs
+ * without having to re-enable interrupts. It's not called while
+ * the interrupt routine is executing.
+ */
+@@ -7679,8 +9233,8 @@
+
+ for (i = 0; i < adapter->num_q_vectors; i++) {
+ q_vector = adapter->q_vector[i];
+- if (adapter->flags & IGB_FLAG_HAS_MSIX)
+- wr32(E1000_EIMC, q_vector->eims_value);
++ if (adapter->msix_entries)
++ E1000_WRITE_REG(hw, E1000_EIMC, q_vector->eims_value);
+ else
+ igb_irq_disable(adapter);
+ napi_schedule(&q_vector->napi);
+@@ -7688,20 +9242,98 @@
+ }
+ #endif /* CONFIG_NET_POLL_CONTROLLER */
+
++#ifdef HAVE_PCI_ERS
++#define E1000_DEV_ID_82576_VF 0x10CA
+ /**
+- * igb_io_error_detected - called when PCI error is detected
+- * @pdev: Pointer to PCI device
+- * @state: The current pci connection state
++ * igb_io_error_detected - called when PCI error is detected
++ * @pdev: Pointer to PCI device
++ * @state: The current pci connection state
+ *
+- * This function is called after a PCI bus error affecting
+- * this device has been detected.
+- **/
++ * This function is called after a PCI bus error affecting
++ * this device has been detected.
++ */
+ static pci_ers_result_t igb_io_error_detected(struct pci_dev *pdev,
+ pci_channel_state_t state)
+ {
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct igb_adapter *adapter = netdev_priv(netdev);
+
++#ifdef CONFIG_PCI_IOV
++ struct pci_dev *bdev, *vfdev;
++ u32 dw0, dw1, dw2, dw3;
++ int vf, pos;
++ u16 req_id, pf_func;
++
++ if (!(adapter->flags & IGB_FLAG_DETECT_BAD_DMA))
++ goto skip_bad_vf_detection;
++
++ bdev = pdev->bus->self;
++ while (bdev && (pci_pcie_type(bdev) != PCI_EXP_TYPE_ROOT_PORT))
++ bdev = bdev->bus->self;
++
++ if (!bdev)
++ goto skip_bad_vf_detection;
++
++ pos = pci_find_ext_capability(bdev, PCI_EXT_CAP_ID_ERR);
++ if (!pos)
++ goto skip_bad_vf_detection;
++
++ pci_read_config_dword(bdev, pos + PCI_ERR_HEADER_LOG, &dw0);
++ pci_read_config_dword(bdev, pos + PCI_ERR_HEADER_LOG + 4, &dw1);
++ pci_read_config_dword(bdev, pos + PCI_ERR_HEADER_LOG + 8, &dw2);
++ pci_read_config_dword(bdev, pos + PCI_ERR_HEADER_LOG + 12, &dw3);
++
++ req_id = dw1 >> 16;
++ /* On the 82576 if bit 7 of the requestor ID is set then it's a VF */
++ if (!(req_id & 0x0080))
++ goto skip_bad_vf_detection;
++
++ pf_func = req_id & 0x01;
++ if ((pf_func & 1) == (pdev->devfn & 1)) {
++
++ vf = (req_id & 0x7F) >> 1;
++ dev_err(pci_dev_to_dev(pdev),
++ "VF %d has caused a PCIe error\n", vf);
++ dev_err(pci_dev_to_dev(pdev),
++ "TLP: dw0: %8.8x\tdw1: %8.8x\tdw2:\n%8.8x\tdw3: %8.8x\n",
++ dw0, dw1, dw2, dw3);
++
++ /* Find the pci device of the offending VF */
++ vfdev = pci_get_device(PCI_VENDOR_ID_INTEL,
++ E1000_DEV_ID_82576_VF, NULL);
++ while (vfdev) {
++ if (vfdev->devfn == (req_id & 0xFF))
++ break;
++ vfdev = pci_get_device(PCI_VENDOR_ID_INTEL,
++ E1000_DEV_ID_82576_VF, vfdev);
++ }
++ /*
++ * There's a slim chance the VF could have been hot plugged,
++ * so if it is no longer present we don't need to issue the
++ * VFLR. Just clean up the AER in that case.
++ */
++ if (vfdev) {
++ dev_err(pci_dev_to_dev(pdev),
++ "Issuing VFLR to VF %d\n", vf);
++ pci_write_config_dword(vfdev, 0xA8, 0x00008000);
++ }
++
++ pci_cleanup_aer_uncorrect_error_status(pdev);
++ }
++
++ /*
++ * Even though the error may have occurred on the other port
++ * we still need to increment the vf error reference count for
++ * both ports because the I/O resume function will be called
++ * for both of them.
++ */
++ adapter->vferr_refcount++;
++
++ return PCI_ERS_RESULT_RECOVERED;
++
++skip_bad_vf_detection:
++#endif /* CONFIG_PCI_IOV */
++
+ netif_device_detach(netdev);
+
+ if (state == pci_channel_io_perm_failure)
+@@ -7716,22 +9348,21 @@
+ }
+
+ /**
+- * igb_io_slot_reset - called after the pci bus has been reset.
+- * @pdev: Pointer to PCI device
++ * igb_io_slot_reset - called after the pci bus has been reset.
++ * @pdev: Pointer to PCI device
+ *
+- * Restart the card from scratch, as if from a cold-boot. Implementation
+- * resembles the first-half of the igb_resume routine.
+- **/
++ * Restart the card from scratch, as if from a cold-boot. Implementation
++ * resembles the first-half of the igb_resume routine.
++ */
+ static pci_ers_result_t igb_io_slot_reset(struct pci_dev *pdev)
+ {
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct igb_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ pci_ers_result_t result;
+- int err;
+
+ if (pci_enable_device_mem(pdev)) {
+- dev_err(&pdev->dev,
++ dev_err(pci_dev_to_dev(pdev),
+ "Cannot re-enable PCI device after reset.\n");
+ result = PCI_ERS_RESULT_DISCONNECT;
+ } else {
+@@ -7742,77 +9373,91 @@
+ pci_enable_wake(pdev, PCI_D3hot, 0);
+ pci_enable_wake(pdev, PCI_D3cold, 0);
+
+- igb_reset(adapter);
+- wr32(E1000_WUS, ~0);
++ schedule_work(&adapter->reset_task);
++ E1000_WRITE_REG(hw, E1000_WUS, ~0);
+ result = PCI_ERS_RESULT_RECOVERED;
+ }
+
+- err = pci_cleanup_aer_uncorrect_error_status(pdev);
+- if (err) {
+- dev_err(&pdev->dev,
+- "pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n",
+- err);
+- /* non-fatal, continue */
+- }
++ pci_cleanup_aer_uncorrect_error_status(pdev);
+
+ return result;
+ }
+
+ /**
+- * igb_io_resume - called when traffic can start flowing again.
+- * @pdev: Pointer to PCI device
++ * igb_io_resume - called when traffic can start flowing again.
++ * @pdev: Pointer to PCI device
+ *
+- * This callback is called when the error recovery driver tells us that
+- * its OK to resume normal operation. Implementation resembles the
+- * second-half of the igb_resume routine.
++ * This callback is called when the error recovery driver tells us that
++ * its OK to resume normal operation. Implementation resembles the
++ * second-half of the igb_resume routine.
+ */
+ static void igb_io_resume(struct pci_dev *pdev)
+ {
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct igb_adapter *adapter = netdev_priv(netdev);
+
++ if (adapter->vferr_refcount) {
++ dev_info(pci_dev_to_dev(pdev), "Resuming after VF err\n");
++ adapter->vferr_refcount--;
++ return;
++ }
++
+ if (netif_running(netdev)) {
+ if (igb_up(adapter)) {
+- dev_err(&pdev->dev, "igb_up failed after reset\n");
++ dev_err(pci_dev_to_dev(pdev), "igb_up failed after reset\n");
+ return;
+ }
+ }
+
+ netif_device_attach(netdev);
+
+- /* let the f/w know that the h/w is now under the control of the
+- * driver.
++ /* let the f/w know that the h/w is now under the control of the driver.
+ */
+ igb_get_hw_control(adapter);
+ }
+
+-static void igb_rar_set_qsel(struct igb_adapter *adapter, u8 *addr, u32 index,
+- u8 qsel)
++#endif /* HAVE_PCI_ERS */
++
++int igb_add_mac_filter(struct igb_adapter *adapter, u8 *addr, u16 queue)
+ {
+- u32 rar_low, rar_high;
+ struct e1000_hw *hw = &adapter->hw;
++ int i;
+
+- /* HW expects these in little endian so we reverse the byte order
+- * from network order (big endian) to little endian
+- */
+- rar_low = ((u32) addr[0] | ((u32) addr[1] << 8) |
+- ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
+- rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
+-
+- /* Indicate to hardware the Address is Valid. */
+- rar_high |= E1000_RAH_AV;
+-
+- if (hw->mac.type == e1000_82575)
+- rar_high |= E1000_RAH_POOL_1 * qsel;
+- else
+- rar_high |= E1000_RAH_POOL_1 << qsel;
++ if (is_zero_ether_addr(addr))
++ return 0;
+
+- wr32(E1000_RAL(index), rar_low);
+- wrfl();
+- wr32(E1000_RAH(index), rar_high);
+- wrfl();
++ for (i = 0; i < hw->mac.rar_entry_count; i++) {
++ if (adapter->mac_table[i].state & IGB_MAC_STATE_IN_USE)
++ continue;
++ adapter->mac_table[i].state = (IGB_MAC_STATE_MODIFIED |
++ IGB_MAC_STATE_IN_USE);
++ memcpy(adapter->mac_table[i].addr, addr, ETH_ALEN);
++ adapter->mac_table[i].queue = queue;
++ igb_sync_mac_table(adapter);
++ return 0;
++ }
++ return -ENOMEM;
+ }
++int igb_del_mac_filter(struct igb_adapter *adapter, u8 *addr, u16 queue)
++{
++ /* search table for addr, if found, set to 0 and sync */
++ int i;
++ struct e1000_hw *hw = &adapter->hw;
+
++ if (is_zero_ether_addr(addr))
++ return 0;
++ for (i = 0; i < hw->mac.rar_entry_count; i++) {
++ if (!ether_addr_equal(addr, adapter->mac_table[i].addr) &&
++ adapter->mac_table[i].queue == queue) {
++ adapter->mac_table[i].state = IGB_MAC_STATE_MODIFIED;
++ memset(adapter->mac_table[i].addr, 0, ETH_ALEN);
++ adapter->mac_table[i].queue = 0;
++ igb_sync_mac_table(adapter);
++ return 0;
++ }
++ }
++ return -ENOMEM;
++}
+ static int igb_set_vf_mac(struct igb_adapter *adapter,
+ int vf, unsigned char *mac_addr)
+ {
+@@ -7829,15 +9474,17 @@
+ return 0;
+ }
+
++#ifdef IFLA_VF_MAX
+ static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
+ {
+ struct igb_adapter *adapter = netdev_priv(netdev);
++
+ if (!is_valid_ether_addr(mac) || (vf >= adapter->vfs_allocated_count))
+ return -EINVAL;
+ adapter->vf_data[vf].flags |= IGB_VF_FLAG_PF_SET_MAC;
+ dev_info(&adapter->pdev->dev, "setting MAC %pM on VF %d\n", mac, vf);
+ dev_info(&adapter->pdev->dev,
+- "Reload the VF driver to make this change effective.");
++ "Reload the VF driver to make this change effective.\n");
+ if (test_bit(__IGB_DOWN, &adapter->state)) {
+ dev_warn(&adapter->pdev->dev,
+ "The VF MAC address has been set, but the PF device is not up.\n");
+@@ -7854,13 +9501,15 @@
+ return 100;
+ case SPEED_1000:
+ return 1000;
++ case SPEED_2500:
++ return 2500;
+ default:
+ return 0;
+ }
+ }
+
+ static void igb_set_vf_rate_limit(struct e1000_hw *hw, int vf, int tx_rate,
+- int link_speed)
++ int link_speed)
+ {
+ int rf_dec, rf_int;
+ u32 bcnrc_val;
+@@ -7869,23 +9518,23 @@
+ /* Calculate the rate factor values to set */
+ rf_int = link_speed / tx_rate;
+ rf_dec = (link_speed - (rf_int * tx_rate));
+- rf_dec = (rf_dec * (1 << E1000_RTTBCNRC_RF_INT_SHIFT)) /
+- tx_rate;
++ rf_dec = (rf_dec * (1<vf_rate_link_speed == 0) ||
+- (adapter->hw.mac.type != e1000_82576))
++ (adapter->hw.mac.type != e1000_82576))
+ return;
+
+ actual_link_speed = igb_link_mbps(adapter->link_speed);
+@@ -7903,7 +9552,7 @@
+ reset_rate = true;
+ adapter->vf_rate_link_speed = 0;
+ dev_info(&adapter->pdev->dev,
+- "Link speed has been changed. VF Transmit rate is disabled\n");
++ "Link speed has been changed. VF Transmit rate is disabled\n");
+ }
+
+ for (i = 0; i < adapter->vfs_allocated_count; i++) {
+@@ -7911,13 +9560,16 @@
+ adapter->vf_data[i].tx_rate = 0;
+
+ igb_set_vf_rate_limit(&adapter->hw, i,
+- adapter->vf_data[i].tx_rate,
+- actual_link_speed);
++ adapter->vf_data[i].tx_rate, actual_link_speed);
+ }
+ }
+
+-static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf,
+- int min_tx_rate, int max_tx_rate)
++#ifdef HAVE_NDO_SET_VF_MIN_MAX_TX_RATE
++static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf, int min_tx_rate,
++ int max_tx_rate)
++#else
++static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate)
++#endif /* HAVE_NDO_SET_VF_MIN_MAX_TX_RATE */
+ {
+ struct igb_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+@@ -7926,105 +9578,137 @@
+ if (hw->mac.type != e1000_82576)
+ return -EOPNOTSUPP;
+
+- if (min_tx_rate)
+- return -EINVAL;
+-
+ actual_link_speed = igb_link_mbps(adapter->link_speed);
+ if ((vf >= adapter->vfs_allocated_count) ||
+- (!(rd32(E1000_STATUS) & E1000_STATUS_LU)) ||
+- (max_tx_rate < 0) ||
+- (max_tx_rate > actual_link_speed))
++ (!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) ||
++#ifdef HAVE_NDO_SET_VF_MIN_MAX_TX_RATE
++ (max_tx_rate < 0) || (max_tx_rate > actual_link_speed))
++#else
++ (tx_rate < 0) || (tx_rate > actual_link_speed))
++#endif /* HAVE_NDO_SET_VF_MIN_MAX_TX_RATE */
+ return -EINVAL;
+
+ adapter->vf_rate_link_speed = actual_link_speed;
++#ifdef HAVE_NDO_SET_VF_MIN_MAX_TX_RATE
+ adapter->vf_data[vf].tx_rate = (u16)max_tx_rate;
+ igb_set_vf_rate_limit(hw, vf, max_tx_rate, actual_link_speed);
++#else
++ adapter->vf_data[vf].tx_rate = (u16)tx_rate;
++ igb_set_vf_rate_limit(hw, vf, tx_rate, actual_link_speed);
++#endif /* HAVE_NDO_SET_VF_MIN_MAX_TX_RATE */
+
+ return 0;
+ }
+
+-static int igb_ndo_set_vf_spoofchk(struct net_device *netdev, int vf,
+- bool setting)
+-{
+- struct igb_adapter *adapter = netdev_priv(netdev);
+- struct e1000_hw *hw = &adapter->hw;
+- u32 reg_val, reg_offset;
+-
+- if (!adapter->vfs_allocated_count)
+- return -EOPNOTSUPP;
+-
+- if (vf >= adapter->vfs_allocated_count)
+- return -EINVAL;
+-
+- reg_offset = (hw->mac.type == e1000_82576) ? E1000_DTXSWC : E1000_TXSWC;
+- reg_val = rd32(reg_offset);
+- if (setting)
+- reg_val |= ((1 << vf) |
+- (1 << (vf + E1000_DTXSWC_VLAN_SPOOF_SHIFT)));
+- else
+- reg_val &= ~((1 << vf) |
+- (1 << (vf + E1000_DTXSWC_VLAN_SPOOF_SHIFT)));
+- wr32(reg_offset, reg_val);
+-
+- adapter->vf_data[vf].spoofchk_enabled = setting;
+- return 0;
+-}
+-
+ static int igb_ndo_get_vf_config(struct net_device *netdev,
+ int vf, struct ifla_vf_info *ivi)
+ {
+ struct igb_adapter *adapter = netdev_priv(netdev);
++
+ if (vf >= adapter->vfs_allocated_count)
+ return -EINVAL;
+ ivi->vf = vf;
+ memcpy(&ivi->mac, adapter->vf_data[vf].vf_mac_addresses, ETH_ALEN);
++#ifdef HAVE_NDO_SET_VF_MIN_MAX_TX_RATE
+ ivi->max_tx_rate = adapter->vf_data[vf].tx_rate;
+ ivi->min_tx_rate = 0;
++#else
++ ivi->tx_rate = adapter->vf_data[vf].tx_rate;
++#endif /* HAVE_NDO_SET_VF_MIN_MAX_TX_RATE */
+ ivi->vlan = adapter->vf_data[vf].pf_vlan;
+ ivi->qos = adapter->vf_data[vf].pf_qos;
++#ifdef HAVE_VF_SPOOFCHK_CONFIGURE
+ ivi->spoofchk = adapter->vf_data[vf].spoofchk_enabled;
++#endif
+ return 0;
+ }
+-
++#endif
+ static void igb_vmm_control(struct igb_adapter *adapter)
+ {
+ struct e1000_hw *hw = &adapter->hw;
++ int count;
+ u32 reg;
+
+ switch (hw->mac.type) {
+ case e1000_82575:
+- case e1000_i210:
+- case e1000_i211:
+- case e1000_i354:
+ default:
+ /* replication is not supported for 82575 */
+ return;
+ case e1000_82576:
+ /* notify HW that the MAC is adding vlan tags */
+- reg = rd32(E1000_DTXCTL);
+- reg |= E1000_DTXCTL_VLAN_ADDED;
+- wr32(E1000_DTXCTL, reg);
++ reg = E1000_READ_REG(hw, E1000_DTXCTL);
++ reg |= (E1000_DTXCTL_VLAN_ADDED |
++ E1000_DTXCTL_SPOOF_INT);
++ E1000_WRITE_REG(hw, E1000_DTXCTL, reg);
+ /* Fall through */
+ case e1000_82580:
+ /* enable replication vlan tag stripping */
+- reg = rd32(E1000_RPLOLR);
++ reg = E1000_READ_REG(hw, E1000_RPLOLR);
+ reg |= E1000_RPLOLR_STRVLAN;
+- wr32(E1000_RPLOLR, reg);
++ E1000_WRITE_REG(hw, E1000_RPLOLR, reg);
+ /* Fall through */
+ case e1000_i350:
++ case e1000_i354:
+ /* none of the above registers are supported by i350 */
+ break;
+ }
+
+- if (adapter->vfs_allocated_count) {
+- igb_vmdq_set_loopback_pf(hw, true);
+- igb_vmdq_set_replication_pf(hw, true);
+- igb_vmdq_set_anti_spoofing_pf(hw, true,
+- adapter->vfs_allocated_count);
+- } else {
+- igb_vmdq_set_loopback_pf(hw, false);
+- igb_vmdq_set_replication_pf(hw, false);
+- }
++ /* Enable Malicious Driver Detection */
++ if ((adapter->vfs_allocated_count) &&
++ (adapter->mdd)) {
++ if (hw->mac.type == e1000_i350)
++ igb_enable_mdd(adapter);
++ }
++
++ /* enable replication and loopback support */
++ count = adapter->vfs_allocated_count || adapter->vmdq_pools;
++ if (adapter->flags & IGB_FLAG_LOOPBACK_ENABLE && count)
++ e1000_vmdq_set_loopback_pf(hw, 1);
++ e1000_vmdq_set_anti_spoofing_pf(hw,
++ adapter->vfs_allocated_count || adapter->vmdq_pools,
++ adapter->vfs_allocated_count);
++ e1000_vmdq_set_replication_pf(hw, adapter->vfs_allocated_count ||
++ adapter->vmdq_pools);
++}
++
++static void igb_init_fw(struct igb_adapter *adapter)
++{
++ struct e1000_fw_drv_info fw_cmd;
++ struct e1000_hw *hw = &adapter->hw;
++ int i;
++ u16 mask;
++
++ if (hw->mac.type == e1000_i210)
++ mask = E1000_SWFW_EEP_SM;
++ else
++ mask = E1000_SWFW_PHY0_SM;
++ /* i211 parts do not support this feature */
++ if (hw->mac.type == e1000_i211)
++ hw->mac.arc_subsystem_valid = false;
++
++ if (!hw->mac.ops.acquire_swfw_sync(hw, mask)) {
++ for (i = 0; i <= FW_MAX_RETRIES; i++) {
++ E1000_WRITE_REG(hw, E1000_FWSTS, E1000_FWSTS_FWRI);
++ fw_cmd.hdr.cmd = FW_CMD_DRV_INFO;
++ fw_cmd.hdr.buf_len = FW_CMD_DRV_INFO_LEN;
++ fw_cmd.hdr.cmd_or_resp.cmd_resv = FW_CMD_RESERVED;
++ fw_cmd.port_num = hw->bus.func;
++ fw_cmd.drv_version = FW_FAMILY_DRV_VER;
++ fw_cmd.hdr.checksum = 0;
++ fw_cmd.hdr.checksum =
++ e1000_calculate_checksum((u8 *)&fw_cmd,
++ (FW_HDR_LEN +
++ fw_cmd.hdr.buf_len));
++ e1000_host_interface_command(hw, (u8 *)&fw_cmd,
++ sizeof(fw_cmd));
++ if (fw_cmd.hdr.cmd_or_resp.ret_status
++ == FW_STATUS_SUCCESS)
++ break;
++ }
++ } else
++ dev_warn(pci_dev_to_dev(adapter->pdev),
++ "Unable to get semaphore, firmware init failed.\n");
++ hw->mac.ops.release_swfw_sync(hw, mask);
+ }
+
+ static void igb_init_dmac(struct igb_adapter *adapter, u32 pba)
+@@ -8032,34 +9716,40 @@
+ struct e1000_hw *hw = &adapter->hw;
+ u32 dmac_thr;
+ u16 hwm;
++ u32 status;
++
++ if (hw->mac.type == e1000_i211)
++ return;
+
+ if (hw->mac.type > e1000_82580) {
+- if (adapter->flags & IGB_FLAG_DMAC) {
++ if (adapter->dmac != IGB_DMAC_DISABLE) {
+ u32 reg;
+
+- /* force threshold to 0. */
+- wr32(E1000_DMCTXTH, 0);
++ /* force threshold to 0. */
++ E1000_WRITE_REG(hw, E1000_DMCTXTH, 0);
+
+- /* DMA Coalescing high water mark needs to be greater
++ /*
++ * DMA Coalescing high water mark needs to be greater
+ * than the Rx threshold. Set hwm to PBA - max frame
+ * size in 16B units, capping it at PBA - 6KB.
+ */
+ hwm = 64 * pba - adapter->max_frame_size / 16;
+ if (hwm < 64 * (pba - 6))
+ hwm = 64 * (pba - 6);
+- reg = rd32(E1000_FCRTC);
++ reg = E1000_READ_REG(hw, E1000_FCRTC);
+ reg &= ~E1000_FCRTC_RTH_COAL_MASK;
+ reg |= ((hwm << E1000_FCRTC_RTH_COAL_SHIFT)
+ & E1000_FCRTC_RTH_COAL_MASK);
+- wr32(E1000_FCRTC, reg);
++ E1000_WRITE_REG(hw, E1000_FCRTC, reg);
+
+- /* Set the DMA Coalescing Rx threshold to PBA - 2 * max
++ /*
++ * Set the DMA Coalescing Rx threshold to PBA - 2 * max
+ * frame size, capping it at PBA - 10KB.
+ */
+ dmac_thr = pba - adapter->max_frame_size / 512;
+ if (dmac_thr < pba - 10)
+ dmac_thr = pba - 10;
+- reg = rd32(E1000_DMACR);
++ reg = E1000_READ_REG(hw, E1000_DMACR);
+ reg &= ~E1000_DMACR_DMACTHR_MASK;
+ reg |= ((dmac_thr << E1000_DMACR_DMACTHR_SHIFT)
+ & E1000_DMACR_DMACTHR_MASK);
+@@ -8067,47 +9757,84 @@
+ /* transition to L0x or L1 if available..*/
+ reg |= (E1000_DMACR_DMAC_EN | E1000_DMACR_DMAC_LX_MASK);
+
+- /* watchdog timer= +-1000 usec in 32usec intervals */
+- reg |= (1000 >> 5);
++ /* Check if status is 2.5Gb backplane connection
++ * before configuration of watchdog timer, which is
++ * in msec values in 12.8usec intervals
++ * watchdog timer= msec values in 32usec intervals
++ * for non 2.5Gb connection
++ */
++ if (hw->mac.type == e1000_i354) {
++ status = E1000_READ_REG(hw, E1000_STATUS);
++ if ((status & E1000_STATUS_2P5_SKU) &&
++ (!(status & E1000_STATUS_2P5_SKU_OVER)))
++ reg |= ((adapter->dmac * 5) >> 6);
++ else
++ reg |= ((adapter->dmac) >> 5);
++ } else {
++ reg |= ((adapter->dmac) >> 5);
++ }
+
+- /* Disable BMC-to-OS Watchdog Enable */
++ /*
++ * Disable BMC-to-OS Watchdog enable
++ * on devices that support OS-to-BMC
++ */
+ if (hw->mac.type != e1000_i354)
+ reg &= ~E1000_DMACR_DC_BMC2OSW_EN;
++ E1000_WRITE_REG(hw, E1000_DMACR, reg);
+
+- wr32(E1000_DMACR, reg);
++ /* no lower threshold to disable coalescing
++ * (smart fifb)-UTRESH=0
++ */
++ E1000_WRITE_REG(hw, E1000_DMCRTRH, 0);
+
+- /* no lower threshold to disable
+- * coalescing(smart fifb)-UTRESH=0
++ /* This sets the time to wait before requesting
++ * transition to low power state to number of usecs
++ * needed to receive 1 512 byte frame at gigabit
++ * line rate. On i350 device, time to make transition
++ * to Lx state is delayed by 4 usec with flush disable
++ * bit set to avoid losing mailbox interrupts
+ */
+- wr32(E1000_DMCRTRH, 0);
++ reg = E1000_READ_REG(hw, E1000_DMCTLX);
++ if (hw->mac.type == e1000_i350)
++ reg |= IGB_DMCTLX_DCFLUSH_DIS;
+
+- reg = (IGB_DMCTLX_DCFLUSH_DIS | 0x4);
++ /* in 2.5Gb connection, TTLX unit is 0.4 usec
++ * which is 0x4*2 = 0xA. But delay is still 4 usec
++ */
++ if (hw->mac.type == e1000_i354) {
++ status = E1000_READ_REG(hw, E1000_STATUS);
++ if ((status & E1000_STATUS_2P5_SKU) &&
++ (!(status & E1000_STATUS_2P5_SKU_OVER)))
++ reg |= 0xA;
++ else
++ reg |= 0x4;
++ } else {
++ reg |= 0x4;
++ }
+
+- wr32(E1000_DMCTLX, reg);
++ E1000_WRITE_REG(hw, E1000_DMCTLX, reg);
+
+- /* free space in tx packet buffer to wake from
+- * DMA coal
+- */
+- wr32(E1000_DMCTXTH, (IGB_MIN_TXPBSIZE -
+- (IGB_TX_BUF_4096 + adapter->max_frame_size)) >> 6);
++ /* free space in tx pkt buffer to wake from DMA coal */
++ E1000_WRITE_REG(hw, E1000_DMCTXTH, (IGB_MIN_TXPBSIZE -
++ (IGB_TX_BUF_4096 + adapter->max_frame_size))
++ >> 6);
+
+- /* make low power state decision controlled
+- * by DMA coal
+- */
+- reg = rd32(E1000_PCIEMISC);
++ /* low power state decision controlled by DMA coal */
++ reg = E1000_READ_REG(hw, E1000_PCIEMISC);
+ reg &= ~E1000_PCIEMISC_LX_DECISION;
+- wr32(E1000_PCIEMISC, reg);
++ E1000_WRITE_REG(hw, E1000_PCIEMISC, reg);
+ } /* endif adapter->dmac is not disabled */
+ } else if (hw->mac.type == e1000_82580) {
+- u32 reg = rd32(E1000_PCIEMISC);
++ u32 reg = E1000_READ_REG(hw, E1000_PCIEMISC);
+
+- wr32(E1000_PCIEMISC, reg & ~E1000_PCIEMISC_LX_DECISION);
+- wr32(E1000_DMACR, 0);
++ E1000_WRITE_REG(hw, E1000_PCIEMISC,
++ reg & ~E1000_PCIEMISC_LX_DECISION);
++ E1000_WRITE_REG(hw, E1000_DMACR, 0);
+ }
+ }
+
+-/**
+- * igb_read_i2c_byte - Reads 8 bit word over I2C
++#ifdef HAVE_I2C_SUPPORT
++/* igb_read_i2c_byte - Reads 8 bit word over I2C
+ * @hw: pointer to hardware structure
+ * @byte_offset: byte offset to read
+ * @dev_addr: device address
+@@ -8115,9 +9842,9 @@
+ *
+ * Performs byte read operation over I2C interface at
+ * a specified device address.
+- **/
++ */
+ s32 igb_read_i2c_byte(struct e1000_hw *hw, u8 byte_offset,
+- u8 dev_addr, u8 *data)
++ u8 dev_addr, u8 *data)
+ {
+ struct igb_adapter *adapter = container_of(hw, struct igb_adapter, hw);
+ struct i2c_client *this_client = adapter->i2c_client;
+@@ -8129,7 +9856,8 @@
+
+ swfw_mask = E1000_SWFW_PHY0_SM;
+
+- if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask))
++ if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask)
++ != E1000_SUCCESS)
+ return E1000_ERR_SWFW_SYNC;
+
+ status = i2c_smbus_read_byte_data(this_client, byte_offset);
+@@ -8139,12 +9867,11 @@
+ return E1000_ERR_I2C;
+ else {
+ *data = status;
+- return 0;
++ return E1000_SUCCESS;
+ }
+ }
+
+-/**
+- * igb_write_i2c_byte - Writes 8 bit word over I2C
++/* igb_write_i2c_byte - Writes 8 bit word over I2C
+ * @hw: pointer to hardware structure
+ * @byte_offset: byte offset to write
+ * @dev_addr: device address
+@@ -8152,9 +9879,9 @@
+ *
+ * Performs byte write operation over I2C interface at
+ * a specified device address.
+- **/
++ */
+ s32 igb_write_i2c_byte(struct e1000_hw *hw, u8 byte_offset,
+- u8 dev_addr, u8 data)
++ u8 dev_addr, u8 data)
+ {
+ struct igb_adapter *adapter = container_of(hw, struct igb_adapter, hw);
+ struct i2c_client *this_client = adapter->i2c_client;
+@@ -8164,7 +9891,7 @@
+ if (!this_client)
+ return E1000_ERR_I2C;
+
+- if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask))
++ if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask) != E1000_SUCCESS)
+ return E1000_ERR_SWFW_SYNC;
+ status = i2c_smbus_write_byte_data(this_client, byte_offset, data);
+ hw->mac.ops.release_swfw_sync(hw, swfw_mask);
+@@ -8172,9 +9899,9 @@
+ if (status)
+ return E1000_ERR_I2C;
+ else
+- return 0;
+-
++ return E1000_SUCCESS;
+ }
++#endif /* HAVE_I2C_SUPPORT */
+
+ int igb_reinit_queues(struct igb_adapter *adapter)
+ {
+@@ -8197,4 +9924,5 @@
+
+ return err;
+ }
++
+ /* igb_main.c */
+diff -Nu a/drivers/net/ethernet/intel/igb/igb_param.c b/drivers/net/ethernet/intel/igb/igb_param.c
+--- a/drivers/net/ethernet/intel/igb/igb_param.c 1970-01-01 00:00:00.000000000 +0000
++++ b/drivers/net/ethernet/intel/igb/igb_param.c 2016-11-14 14:32:08.579567168 +0000
+@@ -0,0 +1,872 @@
++/*******************************************************************************
++
++ Intel(R) Gigabit Ethernet Linux driver
++ Copyright(c) 2007-2015 Intel Corporation.
++
++ This program is free software; you can redistribute it and/or modify it
++ under the terms and conditions of the GNU General Public License,
++ version 2, as published by the Free Software Foundation.
++
++ This program is distributed in the hope it will be useful, but WITHOUT
++ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ more details.
++
++ The full GNU General Public License is included in this distribution in
++ the file called "COPYING".
++
++ Contact Information:
++ Linux NICS
++ e1000-devel Mailing List
++ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
++
++*******************************************************************************/
++
++
++#include
++
++#include "igb.h"
++
++/* This is the only thing that needs to be changed to adjust the
++ * maximum number of ports that the driver can manage.
++ */
++
++#define IGB_MAX_NIC 32
++
++#define OPTION_UNSET -1
++#define OPTION_DISABLED 0
++#define OPTION_ENABLED 1
++#define MAX_NUM_LIST_OPTS 15
++
++/* All parameters are treated the same, as an integer array of values.
++ * This macro just reduces the need to repeat the same declaration code
++ * over and over (plus this helps to avoid typo bugs).
++ */
++
++#define IGB_PARAM_INIT { [0 ... IGB_MAX_NIC] = OPTION_UNSET }
++#ifndef module_param_array
++/* Module Parameters are always initialized to -1, so that the driver
++ * can tell the difference between no user specified value or the
++ * user asking for the default value.
++ * The true default values are loaded in when igb_check_options is called.
++ *
++ * This is a GCC extension to ANSI C.
++ * See the item "Labeled Elements in Initializers" in the section
++ * "Extensions to the C Language Family" of the GCC documentation.
++ */
++
++#define IGB_PARAM(X, desc) \
++ static const int X[IGB_MAX_NIC+1] = IGB_PARAM_INIT; \
++ MODULE_PARM(X, "1-" __MODULE_STRING(IGB_MAX_NIC) "i"); \
++ MODULE_PARM_DESC(X, desc);
++#else
++#define IGB_PARAM(X, desc) \
++ static int X[IGB_MAX_NIC+1] = IGB_PARAM_INIT; \
++ static unsigned int num_##X; \
++ module_param_array_named(X, X, int, &num_##X, 0); \
++ MODULE_PARM_DESC(X, desc);
++#endif
++
++/* Interrupt Throttle Rate (interrupts/sec)
++ *
++ * Valid Range: 100-100000 (0=off, 1=dynamic, 3=dynamic conservative)
++ */
++IGB_PARAM(InterruptThrottleRate,
++ "Maximum interrupts per second, per vector, (max 100000), default 3=adaptive");
++#define DEFAULT_ITR 3
++#define MAX_ITR 100000
++/* #define MIN_ITR 120 */
++#define MIN_ITR 0
++/* IntMode (Interrupt Mode)
++ *
++ * Valid Range: 0 - 2
++ *
++ * Default Value: 2 (MSI-X)
++ */
++IGB_PARAM(IntMode,
++ "Change Interrupt Mode (0=Legacy, 1=MSI, 2=MSI-X), default 2");
++#define MAX_INTMODE IGB_INT_MODE_MSIX
++#define MIN_INTMODE IGB_INT_MODE_LEGACY
++
++IGB_PARAM(Node, "set the starting node to allocate memory on, default -1");
++
++/* LLIPort (Low Latency Interrupt TCP Port)
++ *
++ * Valid Range: 0 - 65535
++ *
++ * Default Value: 0 (disabled)
++ */
++IGB_PARAM(LLIPort,
++ "Low Latency Interrupt TCP Port (0-65535), default 0=off");
++
++#define DEFAULT_LLIPORT 0
++#define MAX_LLIPORT 0xFFFF
++#define MIN_LLIPORT 0
++
++/* LLIPush (Low Latency Interrupt on TCP Push flag)
++ *
++ * Valid Range: 0, 1
++ *
++ * Default Value: 0 (disabled)
++ */
++IGB_PARAM(LLIPush, "Low Latency Interrupt on TCP Push flag (0,1), default 0=off");
++
++#define DEFAULT_LLIPUSH 0
++#define MAX_LLIPUSH 1
++#define MIN_LLIPUSH 0
++
++/* LLISize (Low Latency Interrupt on Packet Size)
++ *
++ * Valid Range: 0 - 1500
++ *
++ * Default Value: 0 (disabled)
++ */
++IGB_PARAM(LLISize,
++ "Low Latency Interrupt on Packet Size (0-1500), default 0=off");
++
++#define DEFAULT_LLISIZE 0
++#define MAX_LLISIZE 1500
++#define MIN_LLISIZE 0
++
++/* RSS (Enable RSS multiqueue receive)
++ *
++ * Valid Range: 0 - 8
++ *
++ * Default Value: 1
++ */
++IGB_PARAM(RSS,
++ "Number of Receive-Side Scaling Descriptor Queues (0-8), default 1, 0=number of cpus");
++
++#define DEFAULT_RSS 1
++#define MAX_RSS 8
++#define MIN_RSS 0
++
++/* VMDQ (Enable VMDq multiqueue receive)
++ *
++ * Valid Range: 0 - 8
++ *
++ * Default Value: 0
++ */
++IGB_PARAM(VMDQ,
++ "Number of Virtual Machine Device Queues: 0-1 = disable, 2-8 enable, default 0");
++
++#define DEFAULT_VMDQ 0
++#define MAX_VMDQ MAX_RSS
++#define MIN_VMDQ 0
++
++/* max_vfs (Enable SR-IOV VF devices)
++ *
++ * Valid Range: 0 - 7
++ *
++ * Default Value: 0
++ */
++IGB_PARAM(max_vfs,
++ "Number of Virtual Functions: 0 = disable, 1-7 enable, default 0");
++
++#define DEFAULT_SRIOV 0
++#define MAX_SRIOV 7
++#define MIN_SRIOV 0
++
++/* MDD (Enable Malicious Driver Detection)
++ *
++ * Only available when SR-IOV is enabled - max_vfs is greater than 0
++ *
++ * Valid Range: 0, 1
++ *
++ * Default Value: 1
++ */
++IGB_PARAM(MDD,
++ "Malicious Driver Detection (0/1), default 1 = enabled. Only available when max_vfs is greater than 0");
++
++#ifdef DEBUG
++
++/* Disable Hardware Reset on Tx Hang
++ *
++ * Valid Range: 0, 1
++ *
++ * Default Value: 0 (disabled, i.e. h/w will reset)
++ */
++IGB_PARAM(DisableHwReset, "Disable reset of hardware on Tx hang");
++
++/* Dump Transmit and Receive buffers
++ *
++ * Valid Range: 0, 1
++ *
++ * Default Value: 0
++ */
++IGB_PARAM(DumpBuffers, "Dump Tx/Rx buffers on Tx hang or by request");
++
++#endif /* DEBUG */
++
++/* QueuePairs (Enable TX/RX queue pairs for interrupt handling)
++ *
++ * Valid Range: 0 - 1
++ *
++ * Default Value: 1
++ */
++IGB_PARAM(QueuePairs,
++ "Enable Tx/Rx queue pairs for interrupt handling (0,1), default 1=on");
++
++#define DEFAULT_QUEUE_PAIRS 1
++#define MAX_QUEUE_PAIRS 1
++#define MIN_QUEUE_PAIRS 0
++
++/* Enable/disable EEE (a.k.a. IEEE802.3az)
++ *
++ * Valid Range: 0, 1
++ *
++ * Default Value: 1
++ */
++IGB_PARAM(EEE,
++ "Enable/disable on parts that support the feature");
++
++/* Enable/disable DMA Coalescing
++ *
++ * Valid Values: 0(off), 1000, 2000, 3000, 4000, 5000, 6000, 7000, 8000,
++ * 9000, 10000(msec), 250(usec), 500(usec)
++ *
++ * Default Value: 0
++ */
++IGB_PARAM(DMAC,
++ "Disable or set latency for DMA Coalescing ((0=off, 1000-10000(msec), 250, 500 (usec))");
++
++#ifndef IGB_NO_LRO
++/* Enable/disable Large Receive Offload
++ *
++ * Valid Values: 0(off), 1(on)
++ *
++ * Default Value: 0
++ */
++IGB_PARAM(LRO, "Large Receive Offload (0,1), default 0=off");
++
++#endif
++struct igb_opt_list {
++ int i;
++ char *str;
++};
++struct igb_option {
++ enum { enable_option, range_option, list_option } type;
++ const char *name;
++ const char *err;
++ int def;
++ union {
++ struct { /* range_option info */
++ int min;
++ int max;
++ } r;
++ struct { /* list_option info */
++ int nr;
++ struct igb_opt_list *p;
++ } l;
++ } arg;
++};
++
++static int igb_validate_option(unsigned int *value,
++ struct igb_option *opt,
++ struct igb_adapter *adapter)
++{
++ if (*value == OPTION_UNSET) {
++ *value = opt->def;
++ return 0;
++ }
++
++ switch (opt->type) {
++ case enable_option:
++ switch (*value) {
++ case OPTION_ENABLED:
++ DPRINTK(PROBE, INFO, "%s Enabled\n", opt->name);
++ return 0;
++ case OPTION_DISABLED:
++ DPRINTK(PROBE, INFO, "%s Disabled\n", opt->name);
++ return 0;
++ }
++ break;
++ case range_option:
++ if (*value >= opt->arg.r.min && *value <= opt->arg.r.max) {
++ DPRINTK(PROBE, INFO,
++ "%s set to %d\n", opt->name, *value);
++ return 0;
++ }
++ break;
++ case list_option: {
++ int i;
++ struct igb_opt_list *ent;
++
++ for (i = 0; i < opt->arg.l.nr; i++) {
++ ent = &opt->arg.l.p[i];
++ if (*value == ent->i) {
++ if (ent->str[0] != '\0')
++ DPRINTK(PROBE, INFO, "%s\n", ent->str);
++ return 0;
++ }
++ }
++ }
++ break;
++ default:
++ BUG();
++ }
++
++ DPRINTK(PROBE, INFO, "Invalid %s value specified (%d) %s\n",
++ opt->name, *value, opt->err);
++ *value = opt->def;
++ return -1;
++}
++
++/**
++ * igb_check_options - Range Checking for Command Line Parameters
++ * @adapter: board private structure
++ *
++ * This routine checks all command line parameters for valid user
++ * input. If an invalid value is given, or if no user specified
++ * value exists, a default value is used. The final value is stored
++ * in a variable in the adapter structure.
++ **/
++
++void igb_check_options(struct igb_adapter *adapter)
++{
++ int bd = adapter->bd_number;
++ struct e1000_hw *hw = &adapter->hw;
++
++ if (bd >= IGB_MAX_NIC) {
++ DPRINTK(PROBE, NOTICE,
++ "Warning: no configuration for board #%d\n", bd);
++ DPRINTK(PROBE, NOTICE, "Using defaults for all values\n");
++#ifndef module_param_array
++ bd = IGB_MAX_NIC;
++#endif
++ }
++
++ { /* Interrupt Throttling Rate */
++ struct igb_option opt = {
++ .type = range_option,
++ .name = "Interrupt Throttling Rate (ints/sec)",
++ .err = "using default of "__MODULE_STRING(DEFAULT_ITR),
++ .def = DEFAULT_ITR,
++ .arg = { .r = { .min = MIN_ITR,
++ .max = MAX_ITR } }
++ };
++
++#ifdef module_param_array
++ if (num_InterruptThrottleRate > bd) {
++#endif
++ unsigned int itr = InterruptThrottleRate[bd];
++
++ switch (itr) {
++ case 0:
++ DPRINTK(PROBE, INFO, "%s turned off\n",
++ opt.name);
++ if (hw->mac.type >= e1000_i350)
++ adapter->dmac = IGB_DMAC_DISABLE;
++ adapter->rx_itr_setting = itr;
++ break;
++ case 1:
++ DPRINTK(PROBE, INFO, "%s set to dynamic mode\n",
++ opt.name);
++ adapter->rx_itr_setting = itr;
++ break;
++ case 3:
++ DPRINTK(PROBE, INFO,
++ "%s set to dynamic conservative mode\n",
++ opt.name);
++ adapter->rx_itr_setting = itr;
++ break;
++ default:
++ igb_validate_option(&itr, &opt, adapter);
++ /* Save the setting, because the dynamic bits
++ * change itr. In case of invalid user value,
++ * default to conservative mode, else need to
++ * clear the lower two bits because they are
++ * used as control */
++ if (itr == 3) {
++ adapter->rx_itr_setting = itr;
++ } else {
++ adapter->rx_itr_setting = 1000000000
++ / (itr * 256);
++ adapter->rx_itr_setting &= ~3;
++ }
++ break;
++ }
++#ifdef module_param_array
++ } else {
++ adapter->rx_itr_setting = opt.def;
++ }
++#endif
++ adapter->tx_itr_setting = adapter->rx_itr_setting;
++ }
++ { /* Interrupt Mode */
++ struct igb_option opt = {
++ .type = range_option,
++ .name = "Interrupt Mode",
++ .err = "defaulting to 2 (MSI-X)",
++ .def = IGB_INT_MODE_MSIX,
++ .arg = { .r = { .min = MIN_INTMODE,
++ .max = MAX_INTMODE } }
++ };
++
++#ifdef module_param_array
++ if (num_IntMode > bd) {
++#endif
++ unsigned int int_mode = IntMode[bd];
++ igb_validate_option(&int_mode, &opt, adapter);
++ adapter->int_mode = int_mode;
++#ifdef module_param_array
++ } else {
++ adapter->int_mode = opt.def;
++ }
++#endif
++ }
++ { /* Low Latency Interrupt TCP Port */
++ struct igb_option opt = {
++ .type = range_option,
++ .name = "Low Latency Interrupt TCP Port",
++ .err = "using default of "
++ __MODULE_STRING(DEFAULT_LLIPORT),
++ .def = DEFAULT_LLIPORT,
++ .arg = { .r = { .min = MIN_LLIPORT,
++ .max = MAX_LLIPORT } }
++ };
++
++#ifdef module_param_array
++ if (num_LLIPort > bd) {
++#endif
++ adapter->lli_port = LLIPort[bd];
++ if (adapter->lli_port) {
++ igb_validate_option(&adapter->lli_port, &opt,
++ adapter);
++ } else {
++ DPRINTK(PROBE, INFO, "%s turned off\n",
++ opt.name);
++ }
++#ifdef module_param_array
++ } else {
++ adapter->lli_port = opt.def;
++ }
++#endif
++ }
++ { /* Low Latency Interrupt on Packet Size */
++ struct igb_option opt = {
++ .type = range_option,
++ .name = "Low Latency Interrupt on Packet Size",
++ .err = "using default of "
++ __MODULE_STRING(DEFAULT_LLISIZE),
++ .def = DEFAULT_LLISIZE,
++ .arg = { .r = { .min = MIN_LLISIZE,
++ .max = MAX_LLISIZE } }
++ };
++
++#ifdef module_param_array
++ if (num_LLISize > bd) {
++#endif
++ adapter->lli_size = LLISize[bd];
++ if (adapter->lli_size) {
++ igb_validate_option(&adapter->lli_size, &opt,
++ adapter);
++ } else {
++ DPRINTK(PROBE, INFO, "%s turned off\n",
++ opt.name);
++ }
++#ifdef module_param_array
++ } else {
++ adapter->lli_size = opt.def;
++ }
++#endif
++ }
++ { /* Low Latency Interrupt on TCP Push flag */
++ struct igb_option opt = {
++ .type = enable_option,
++ .name = "Low Latency Interrupt on TCP Push flag",
++ .err = "defaulting to Disabled",
++ .def = OPTION_DISABLED
++ };
++
++#ifdef module_param_array
++ if (num_LLIPush > bd) {
++#endif
++ unsigned int lli_push = LLIPush[bd];
++ igb_validate_option(&lli_push, &opt, adapter);
++ adapter->flags |= lli_push ? IGB_FLAG_LLI_PUSH : 0;
++#ifdef module_param_array
++ } else {
++ adapter->flags |= opt.def ? IGB_FLAG_LLI_PUSH : 0;
++ }
++#endif
++ }
++ { /* SRIOV - Enable SR-IOV VF devices */
++ struct igb_option opt = {
++ .type = range_option,
++ .name = "max_vfs - SR-IOV VF devices",
++ .err = "using default of "
++ __MODULE_STRING(DEFAULT_SRIOV),
++ .def = DEFAULT_SRIOV,
++ .arg = { .r = { .min = MIN_SRIOV,
++ .max = MAX_SRIOV } }
++ };
++
++#ifdef module_param_array
++ if (num_max_vfs > bd) {
++#endif
++ adapter->vfs_allocated_count = max_vfs[bd];
++ igb_validate_option(&adapter->vfs_allocated_count,
++ &opt, adapter);
++
++#ifdef module_param_array
++ } else {
++ adapter->vfs_allocated_count = opt.def;
++ }
++#endif
++ if (adapter->vfs_allocated_count) {
++ switch (hw->mac.type) {
++ case e1000_82575:
++ case e1000_82580:
++ case e1000_i210:
++ case e1000_i211:
++ case e1000_i354:
++ adapter->vfs_allocated_count = 0;
++ DPRINTK(PROBE, INFO,
++ "SR-IOV option max_vfs not supported.\n");
++ /* Fall through */
++ default:
++ break;
++ }
++ }
++ }
++ { /* VMDQ - Enable VMDq multiqueue receive */
++ struct igb_option opt = {
++ .type = range_option,
++ .name = "VMDQ - VMDq multiqueue queue count",
++ .err = "using default of "__MODULE_STRING(DEFAULT_VMDQ),
++ .def = DEFAULT_VMDQ,
++ .arg = { .r = { .min = MIN_VMDQ,
++ .max = (MAX_VMDQ
++ - adapter->vfs_allocated_count)} }
++ };
++ if ((hw->mac.type != e1000_i210) ||
++ (hw->mac.type != e1000_i211)) {
++#ifdef module_param_array
++ if (num_VMDQ > bd) {
++#endif
++ adapter->vmdq_pools = (VMDQ[bd] == 1 ? 0 : VMDQ[bd]);
++ if (adapter->vfs_allocated_count &&
++ !adapter->vmdq_pools) {
++ DPRINTK(PROBE, INFO,
++ "Enabling SR-IOV requires VMDq be set to at least 1\n");
++ adapter->vmdq_pools = 1;
++ }
++ igb_validate_option(&adapter->vmdq_pools, &opt,
++ adapter);
++
++#ifdef module_param_array
++ } else {
++ if (!adapter->vfs_allocated_count)
++ adapter->vmdq_pools = (opt.def == 1 ? 0
++ : opt.def);
++ else
++ adapter->vmdq_pools = 1;
++ }
++#endif
++#ifdef CONFIG_IGB_VMDQ_NETDEV
++ if (hw->mac.type == e1000_82575 && adapter->vmdq_pools) {
++ DPRINTK(PROBE, INFO,
++ "VMDq not supported on this part.\n");
++ adapter->vmdq_pools = 0;
++ }
++#endif
++
++ } else {
++ DPRINTK(PROBE, INFO, "VMDq option is not supported.\n");
++ adapter->vmdq_pools = opt.def;
++ }
++ }
++ { /* RSS - Enable RSS multiqueue receives */
++ struct igb_option opt = {
++ .type = range_option,
++ .name = "RSS - RSS multiqueue receive count",
++ .err = "using default of "__MODULE_STRING(DEFAULT_RSS),
++ .def = DEFAULT_RSS,
++ .arg = { .r = { .min = MIN_RSS,
++ .max = MAX_RSS } }
++ };
++
++ switch (hw->mac.type) {
++ case e1000_82575:
++#ifndef CONFIG_IGB_VMDQ_NETDEV
++ if (!!adapter->vmdq_pools) {
++ if (adapter->vmdq_pools <= 2) {
++ if (adapter->vmdq_pools == 2)
++ opt.arg.r.max = 3;
++ } else {
++ opt.arg.r.max = 1;
++ }
++ } else {
++ opt.arg.r.max = 4;
++ }
++#else
++ opt.arg.r.max = !!adapter->vmdq_pools ? 1 : 4;
++#endif /* CONFIG_IGB_VMDQ_NETDEV */
++ break;
++ case e1000_i210:
++ opt.arg.r.max = 4;
++ break;
++ case e1000_i211:
++ opt.arg.r.max = 2;
++ break;
++ case e1000_82576:
++#ifndef CONFIG_IGB_VMDQ_NETDEV
++ if (!!adapter->vmdq_pools)
++ opt.arg.r.max = 2;
++ break;
++#endif /* CONFIG_IGB_VMDQ_NETDEV */
++ case e1000_82580:
++ case e1000_i350:
++ case e1000_i354:
++ default:
++ if (!!adapter->vmdq_pools)
++ opt.arg.r.max = 1;
++ break;
++ }
++
++ if (adapter->int_mode != IGB_INT_MODE_MSIX) {
++ DPRINTK(PROBE, INFO,
++ "RSS is not supported when in MSI/Legacy Interrupt mode, %s\n",
++ opt.err);
++ opt.arg.r.max = 1;
++ }
++
++#ifdef module_param_array
++ if (num_RSS > bd) {
++#endif
++ adapter->rss_queues = RSS[bd];
++ switch (adapter->rss_queues) {
++ case 1:
++ break;
++ default:
++ igb_validate_option(&adapter->rss_queues, &opt,
++ adapter);
++ if (adapter->rss_queues)
++ break;
++ case 0:
++ adapter->rss_queues = min_t(u32, opt.arg.r.max,
++ num_online_cpus());
++ break;
++ }
++#ifdef module_param_array
++ } else {
++ adapter->rss_queues = opt.def;
++ }
++#endif
++ }
++ { /* QueuePairs - Enable Tx/Rx queue pairs for interrupt handling */
++ struct igb_option opt = {
++ .type = enable_option,
++ .name =
++ "QueuePairs - Tx/Rx queue pairs for interrupt handling",
++ .err = "defaulting to Enabled",
++ .def = OPTION_ENABLED
++ };
++#ifdef module_param_array
++ if (num_QueuePairs > bd) {
++#endif
++ unsigned int qp = QueuePairs[bd];
++ /*
++ * We must enable queue pairs if the number of queues
++ * exceeds the number of available interrupts. We are
++ * limited to 10, or 3 per unallocated vf. On I210 and
++ * I211 devices, we are limited to 5 interrupts.
++ * However, since I211 only supports 2 queues, we do not
++ * need to check and override the user option.
++ */
++ if (qp == OPTION_DISABLED) {
++ if (adapter->rss_queues > 4)
++ qp = OPTION_ENABLED;
++
++ if (adapter->vmdq_pools > 4)
++ qp = OPTION_ENABLED;
++
++ if (adapter->rss_queues > 1 &&
++ (adapter->vmdq_pools > 3 ||
++ adapter->vfs_allocated_count > 6))
++ qp = OPTION_ENABLED;
++
++ if (hw->mac.type == e1000_i210 &&
++ adapter->rss_queues > 2)
++ qp = OPTION_ENABLED;
++
++ if (qp == OPTION_ENABLED)
++ DPRINTK(PROBE, INFO,
++ "Number of queues exceeds available interrupts, %s\n",
++ opt.err);
++ }
++ igb_validate_option(&qp, &opt, adapter);
++ adapter->flags |= qp ? IGB_FLAG_QUEUE_PAIRS : 0;
++#ifdef module_param_array
++ } else {
++ adapter->flags |= opt.def ? IGB_FLAG_QUEUE_PAIRS : 0;
++ }
++#endif
++ }
++ { /* EEE - Enable EEE for capable adapters */
++
++ if (hw->mac.type >= e1000_i350) {
++ struct igb_option opt = {
++ .type = enable_option,
++ .name = "EEE Support",
++ .err = "defaulting to Enabled",
++ .def = OPTION_ENABLED
++ };
++#ifdef module_param_array
++ if (num_EEE > bd) {
++#endif
++ unsigned int eee = EEE[bd];
++ igb_validate_option(&eee, &opt, adapter);
++ adapter->flags |= eee ? IGB_FLAG_EEE : 0;
++ if (eee)
++ hw->dev_spec._82575.eee_disable = false;
++ else
++ hw->dev_spec._82575.eee_disable = true;
++
++#ifdef module_param_array
++ } else {
++ adapter->flags |= opt.def ? IGB_FLAG_EEE : 0;
++ if (adapter->flags & IGB_FLAG_EEE)
++ hw->dev_spec._82575.eee_disable = false;
++ else
++ hw->dev_spec._82575.eee_disable = true;
++ }
++#endif
++ }
++ }
++ { /* DMAC - Enable DMA Coalescing for capable adapters */
++
++ if (hw->mac.type >= e1000_i350) {
++ struct igb_opt_list list[] = {
++ { IGB_DMAC_DISABLE, "DMAC Disable"},
++ { IGB_DMAC_MIN, "DMAC 250 usec"},
++ { IGB_DMAC_500, "DMAC 500 usec"},
++ { IGB_DMAC_EN_DEFAULT, "DMAC 1000 usec"},
++ { IGB_DMAC_2000, "DMAC 2000 usec"},
++ { IGB_DMAC_3000, "DMAC 3000 usec"},
++ { IGB_DMAC_4000, "DMAC 4000 usec"},
++ { IGB_DMAC_5000, "DMAC 5000 usec"},
++ { IGB_DMAC_6000, "DMAC 6000 usec"},
++ { IGB_DMAC_7000, "DMAC 7000 usec"},
++ { IGB_DMAC_8000, "DMAC 8000 usec"},
++ { IGB_DMAC_9000, "DMAC 9000 usec"},
++ { IGB_DMAC_MAX, "DMAC 10000 usec"}
++ };
++ struct igb_option opt = {
++ .type = list_option,
++ .name = "DMA Coalescing",
++ .err = "using default of "
++ __MODULE_STRING(IGB_DMAC_DISABLE),
++ .def = IGB_DMAC_DISABLE,
++ .arg = { .l = { .nr = 13,
++ .p = list
++ }
++ }
++ };
++#ifdef module_param_array
++ if (num_DMAC > bd) {
++#endif
++ unsigned int dmac = DMAC[bd];
++ if (adapter->rx_itr_setting == IGB_DMAC_DISABLE)
++ dmac = IGB_DMAC_DISABLE;
++ igb_validate_option(&dmac, &opt, adapter);
++ switch (dmac) {
++ case IGB_DMAC_DISABLE:
++ adapter->dmac = dmac;
++ break;
++ case IGB_DMAC_MIN:
++ adapter->dmac = dmac;
++ break;
++ case IGB_DMAC_500:
++ adapter->dmac = dmac;
++ break;
++ case IGB_DMAC_EN_DEFAULT:
++ adapter->dmac = dmac;
++ break;
++ case IGB_DMAC_2000:
++ adapter->dmac = dmac;
++ break;
++ case IGB_DMAC_3000:
++ adapter->dmac = dmac;
++ break;
++ case IGB_DMAC_4000:
++ adapter->dmac = dmac;
++ break;
++ case IGB_DMAC_5000:
++ adapter->dmac = dmac;
++ break;
++ case IGB_DMAC_6000:
++ adapter->dmac = dmac;
++ break;
++ case IGB_DMAC_7000:
++ adapter->dmac = dmac;
++ break;
++ case IGB_DMAC_8000:
++ adapter->dmac = dmac;
++ break;
++ case IGB_DMAC_9000:
++ adapter->dmac = dmac;
++ break;
++ case IGB_DMAC_MAX:
++ adapter->dmac = dmac;
++ break;
++ default:
++ adapter->dmac = opt.def;
++ DPRINTK(PROBE, INFO,
++ "Invalid DMAC setting, resetting DMAC to %d\n",
++ opt.def);
++ }
++#ifdef module_param_array
++ } else
++ adapter->dmac = opt.def;
++#endif
++ }
++ }
++#ifndef IGB_NO_LRO
++ { /* LRO - Enable Large Receive Offload */
++ struct igb_option opt = {
++ .type = enable_option,
++ .name = "LRO - Large Receive Offload",
++ .err = "defaulting to Disabled",
++ .def = OPTION_DISABLED
++ };
++ struct net_device *netdev = adapter->netdev;
++#ifdef module_param_array
++ if (num_LRO > bd) {
++#endif
++ unsigned int lro = LRO[bd];
++ igb_validate_option(&lro, &opt, adapter);
++ netdev->features |= lro ? NETIF_F_LRO : 0;
++#ifdef module_param_array
++ } else if (opt.def == OPTION_ENABLED) {
++ netdev->features |= NETIF_F_LRO;
++ }
++#endif
++ }
++#endif /* IGB_NO_LRO */
++ { /* MDD - Enable Malicious Driver Detection. Only available when
++ SR-IOV is enabled. */
++ struct igb_option opt = {
++ .type = enable_option,
++ .name = "Malicious Driver Detection",
++ .err = "defaulting to 1",
++ .def = OPTION_ENABLED,
++ .arg = { .r = { .min = OPTION_DISABLED,
++ .max = OPTION_ENABLED } }
++ };
++
++#ifdef module_param_array
++ if (num_MDD > bd) {
++#endif
++ adapter->mdd = MDD[bd];
++ igb_validate_option((uint *)&adapter->mdd, &opt,
++ adapter);
++#ifdef module_param_array
++ } else {
++ adapter->mdd = opt.def;
++ }
++#endif
++ }
++}
++
+diff -Nu a/drivers/net/ethernet/intel/igb/igb_procfs.c b/drivers/net/ethernet/intel/igb/igb_procfs.c
+--- a/drivers/net/ethernet/intel/igb/igb_procfs.c 1970-01-01 00:00:00.000000000 +0000
++++ b/drivers/net/ethernet/intel/igb/igb_procfs.c 2016-11-14 14:32:08.579567168 +0000
+@@ -0,0 +1,356 @@
++/*******************************************************************************
++
++ Intel(R) Gigabit Ethernet Linux driver
++ Copyright(c) 2007-2015 Intel Corporation.
++
++ This program is free software; you can redistribute it and/or modify it
++ under the terms and conditions of the GNU General Public License,
++ version 2, as published by the Free Software Foundation.
++
++ This program is distributed in the hope it will be useful, but WITHOUT
++ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ more details.
++
++ The full GNU General Public License is included in this distribution in
++ the file called "COPYING".
++
++ Contact Information:
++ Linux NICS
++ e1000-devel Mailing List
++ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
++
++*******************************************************************************/
++
++#include "igb.h"
++#include "e1000_82575.h"
++#include "e1000_hw.h"
++
++#ifdef IGB_PROCFS
++#ifndef IGB_HWMON
++
++#include
++#include
++#include
++#include
++#include
++
++static struct proc_dir_entry *igb_top_dir;
++
++bool igb_thermal_present(struct igb_adapter *adapter)
++{
++ s32 status;
++ struct e1000_hw *hw;
++
++ if (adapter == NULL)
++ return false;
++ hw = &adapter->hw;
++
++ /*
++ * Only set I2C bit-bang mode if an external thermal sensor is
++ * supported on this device.
++ */
++ if (adapter->ets) {
++ status = e1000_set_i2c_bb(hw);
++ if (status != E1000_SUCCESS)
++ return false;
++ }
++
++ status = hw->mac.ops.init_thermal_sensor_thresh(hw);
++ if (status != E1000_SUCCESS)
++ return false;
++
++ return true;
++}
++
++static int igb_macburn(char *page, char **start, off_t off, int count,
++ int *eof, void *data)
++{
++ struct e1000_hw *hw;
++ struct igb_adapter *adapter = (struct igb_adapter *)data;
++ if (adapter == NULL)
++ return snprintf(page, count, "error: no adapter\n");
++
++ hw = &adapter->hw;
++ if (hw == NULL)
++ return snprintf(page, count, "error: no hw data\n");
++
++ return snprintf(page, count, "0x%02X%02X%02X%02X%02X%02X\n",
++ (unsigned int)hw->mac.perm_addr[0],
++ (unsigned int)hw->mac.perm_addr[1],
++ (unsigned int)hw->mac.perm_addr[2],
++ (unsigned int)hw->mac.perm_addr[3],
++ (unsigned int)hw->mac.perm_addr[4],
++ (unsigned int)hw->mac.perm_addr[5]);
++}
++
++static int igb_macadmn(char *page, char **start, off_t off,
++ int count, int *eof, void *data)
++{
++ struct e1000_hw *hw;
++ struct igb_adapter *adapter = (struct igb_adapter *)data;
++ if (adapter == NULL)
++ return snprintf(page, count, "error: no adapter\n");
++
++ hw = &adapter->hw;
++ if (hw == NULL)
++ return snprintf(page, count, "error: no hw data\n");
++
++ return snprintf(page, count, "0x%02X%02X%02X%02X%02X%02X\n",
++ (unsigned int)hw->mac.addr[0],
++ (unsigned int)hw->mac.addr[1],
++ (unsigned int)hw->mac.addr[2],
++ (unsigned int)hw->mac.addr[3],
++ (unsigned int)hw->mac.addr[4],
++ (unsigned int)hw->mac.addr[5]);
++}
++
++static int igb_numeports(char *page, char **start, off_t off, int count,
++ int *eof, void *data)
++{
++ struct e1000_hw *hw;
++ int ports;
++ struct igb_adapter *adapter = (struct igb_adapter *)data;
++ if (adapter == NULL)
++ return snprintf(page, count, "error: no adapter\n");
++
++ hw = &adapter->hw;
++ if (hw == NULL)
++ return snprintf(page, count, "error: no hw data\n");
++
++ ports = 4;
++
++ return snprintf(page, count, "%d\n", ports);
++}
++
++static int igb_porttype(char *page, char **start, off_t off, int count,
++ int *eof, void *data)
++{
++ struct igb_adapter *adapter = (struct igb_adapter *)data;
++ if (adapter == NULL)
++ return snprintf(page, count, "error: no adapter\n");
++
++ return snprintf(page, count, "%d\n",
++ test_bit(__IGB_DOWN, &adapter->state));
++}
++
++static int igb_therm_location(char *page, char **start, off_t off,
++ int count, int *eof, void *data)
++{
++ struct igb_therm_proc_data *therm_data =
++ (struct igb_therm_proc_data *)data;
++
++ if (therm_data == NULL)
++ return snprintf(page, count, "error: no therm_data\n");
++
++ return snprintf(page, count, "%d\n", therm_data->sensor_data->location);
++}
++
++static int igb_therm_maxopthresh(char *page, char **start, off_t off,
++ int count, int *eof, void *data)
++{
++ struct igb_therm_proc_data *therm_data =
++ (struct igb_therm_proc_data *)data;
++
++ if (therm_data == NULL)
++ return snprintf(page, count, "error: no therm_data\n");
++
++ return snprintf(page, count, "%d\n",
++ therm_data->sensor_data->max_op_thresh);
++}
++
++static int igb_therm_cautionthresh(char *page, char **start, off_t off,
++ int count, int *eof, void *data)
++{
++ struct igb_therm_proc_data *therm_data =
++ (struct igb_therm_proc_data *)data;
++
++ if (therm_data == NULL)
++ return snprintf(page, count, "error: no therm_data\n");
++
++ return snprintf(page, count, "%d\n",
++ therm_data->sensor_data->caution_thresh);
++}
++
++static int igb_therm_temp(char *page, char **start, off_t off,
++ int count, int *eof, void *data)
++{
++ s32 status;
++ struct igb_therm_proc_data *therm_data =
++ (struct igb_therm_proc_data *)data;
++
++ if (therm_data == NULL)
++ return snprintf(page, count, "error: no therm_data\n");
++
++ status = e1000_get_thermal_sensor_data(therm_data->hw);
++ if (status != E1000_SUCCESS)
++ snprintf(page, count, "error: status %d returned\n", status);
++
++ return snprintf(page, count, "%d\n", therm_data->sensor_data->temp);
++}
++
++struct igb_proc_type {
++ char name[32];
++ int (*read)(char*, char**, off_t, int, int*, void*);
++};
++
++struct igb_proc_type igb_proc_entries[] = {
++ {"numeports", &igb_numeports},
++ {"porttype", &igb_porttype},
++ {"macburn", &igb_macburn},
++ {"macadmn", &igb_macadmn},
++ {"", NULL}
++};
++
++struct igb_proc_type igb_internal_entries[] = {
++ {"location", &igb_therm_location},
++ {"temp", &igb_therm_temp},
++ {"cautionthresh", &igb_therm_cautionthresh},
++ {"maxopthresh", &igb_therm_maxopthresh},
++ {"", NULL}
++};
++
++void igb_del_proc_entries(struct igb_adapter *adapter)
++{
++ int index, i;
++ char buf[16]; /* much larger than the sensor number will ever be */
++
++ if (igb_top_dir == NULL)
++ return;
++
++ for (i = 0; i < E1000_MAX_SENSORS; i++) {
++ if (adapter->therm_dir[i] == NULL)
++ continue;
++
++ for (index = 0; ; index++) {
++ if (igb_internal_entries[index].read == NULL)
++ break;
++
++ remove_proc_entry(igb_internal_entries[index].name,
++ adapter->therm_dir[i]);
++ }
++ snprintf(buf, sizeof(buf), "sensor_%d", i);
++ remove_proc_entry(buf, adapter->info_dir);
++ }
++
++ if (adapter->info_dir != NULL) {
++ for (index = 0; ; index++) {
++ if (igb_proc_entries[index].read == NULL)
++ break;
++ remove_proc_entry(igb_proc_entries[index].name,
++ adapter->info_dir);
++ }
++ remove_proc_entry("info", adapter->eth_dir);
++ }
++
++ if (adapter->eth_dir != NULL)
++ remove_proc_entry(pci_name(adapter->pdev), igb_top_dir);
++}
++
++/* called from igb_main.c */
++void igb_procfs_exit(struct igb_adapter *adapter)
++{
++ igb_del_proc_entries(adapter);
++}
++
++int igb_procfs_topdir_init(void)
++{
++ igb_top_dir = proc_mkdir("driver/igb", NULL);
++ if (igb_top_dir == NULL)
++ return (-ENOMEM);
++
++ return 0;
++}
++
++void igb_procfs_topdir_exit(void)
++{
++ remove_proc_entry("driver/igb", NULL);
++}
++
++/* called from igb_main.c */
++int igb_procfs_init(struct igb_adapter *adapter)
++{
++ int rc = 0;
++ int i;
++ int index;
++ char buf[16]; /* much larger than the sensor number will ever be */
++
++ adapter->eth_dir = NULL;
++ adapter->info_dir = NULL;
++ for (i = 0; i < E1000_MAX_SENSORS; i++)
++ adapter->therm_dir[i] = NULL;
++
++ if (igb_top_dir == NULL) {
++ rc = -ENOMEM;
++ goto fail;
++ }
++
++ adapter->eth_dir = proc_mkdir(pci_name(adapter->pdev), igb_top_dir);
++ if (adapter->eth_dir == NULL) {
++ rc = -ENOMEM;
++ goto fail;
++ }
++
++ adapter->info_dir = proc_mkdir("info", adapter->eth_dir);
++ if (adapter->info_dir == NULL) {
++ rc = -ENOMEM;
++ goto fail;
++ }
++ for (index = 0; ; index++) {
++ if (igb_proc_entries[index].read == NULL)
++ break;
++ if (!(create_proc_read_entry(igb_proc_entries[index].name,
++ 0444,
++ adapter->info_dir,
++ igb_proc_entries[index].read,
++ adapter))) {
++
++ rc = -ENOMEM;
++ goto fail;
++ }
++ }
++ if (igb_thermal_present(adapter) == false)
++ goto exit;
++
++ for (i = 0; i < E1000_MAX_SENSORS; i++) {
++ if (adapter->hw.mac.thermal_sensor_data.sensor[i].location == 0)
++ continue;
++
++ snprintf(buf, sizeof(buf), "sensor_%d", i);
++ adapter->therm_dir[i] = proc_mkdir(buf, adapter->info_dir);
++ if (adapter->therm_dir[i] == NULL) {
++ rc = -ENOMEM;
++ goto fail;
++ }
++ for (index = 0; ; index++) {
++ if (igb_internal_entries[index].read == NULL)
++ break;
++ /*
++ * therm_data struct contains pointer the read func
++ * will be needing
++ */
++ adapter->therm_data[i].hw = &adapter->hw;
++ adapter->therm_data[i].sensor_data =
++ &adapter->hw.mac.thermal_sensor_data.sensor[i];
++
++ if (!(create_proc_read_entry(
++ igb_internal_entries[index].name,
++ 0444,
++ adapter->therm_dir[i],
++ igb_internal_entries[index].read,
++ &adapter->therm_data[i]))) {
++ rc = -ENOMEM;
++ goto fail;
++ }
++ }
++ }
++ goto exit;
++
++fail:
++ igb_del_proc_entries(adapter);
++exit:
++ return rc;
++}
++
++#endif /* !IGB_HWMON */
++#endif /* IGB_PROCFS */
+diff -Nu a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c
+--- a/drivers/net/ethernet/intel/igb/igb_ptp.c 2016-11-13 09:20:24.790171605 +0000
++++ b/drivers/net/ethernet/intel/igb/igb_ptp.c 2016-11-14 14:32:08.579567168 +0000
+@@ -1,31 +1,46 @@
+-/* PTP Hardware Clock (PHC) driver for the Intel 82576 and 82580
+- *
+- * Copyright (C) 2011 Richard Cochran
+- *
+- * This program is free software; you can redistribute it and/or modify
+- * it under the terms of the GNU General Public License as published by
+- * the Free Software Foundation; either version 2 of the License, or
+- * (at your option) any later version.
+- *
+- * This program is distributed in the hope that it will be useful,
+- * but WITHOUT ANY WARRANTY; without even the implied warranty of
+- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+- * GNU General Public License for more details.
+- *
+- * You should have received a copy of the GNU General Public License along with
+- * this program; if not, see .
+- */
++/*******************************************************************************
++
++ Intel(R) Gigabit Ethernet Linux driver
++ Copyright(c) 2007-2015 Intel Corporation.
++
++ This program is free software; you can redistribute it and/or modify it
++ under the terms and conditions of the GNU General Public License,
++ version 2, as published by the Free Software Foundation.
++
++ This program is distributed in the hope it will be useful, but WITHOUT
++ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ more details.
++
++ The full GNU General Public License is included in this distribution in
++ the file called "COPYING".
++
++ Contact Information:
++ Linux NICS
++ e1000-devel Mailing List
++ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
++
++*******************************************************************************/
++
++/******************************************************************************
++ Copyright(c) 2011 Richard Cochran for some of the
++ 82576 and 82580 code
++******************************************************************************/
++
++#include "igb.h"
++
++#ifdef HAVE_PTP_1588_CLOCK
+ #include
+ #include
+ #include
+ #include
+-
+-#include "igb.h"
++#include
+
+ #define INCVALUE_MASK 0x7fffffff
+ #define ISGN 0x80000000
+
+-/* The 82580 timesync updates the system timer every 8ns by 8ns,
++/*
++ * The 82580 timesync updates the system timer every 8ns by 8ns,
+ * and this update value cannot be reprogrammed.
+ *
+ * Neither the 82576 nor the 82580 offer registers wide enough to hold
+@@ -74,9 +89,10 @@
+ #define INCVALUE_82576 (16 << IGB_82576_TSYNC_SHIFT)
+ #define IGB_NBITS_82580 40
+
+-static void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter);
++/*
++ * SYSTIM read access for the 82576
++ */
+
+-/* SYSTIM read access for the 82576 */
+ static cycle_t igb_ptp_read_82576(const struct cyclecounter *cc)
+ {
+ struct igb_adapter *igb = container_of(cc, struct igb_adapter, cc);
+@@ -84,8 +100,8 @@
+ u64 val;
+ u32 lo, hi;
+
+- lo = rd32(E1000_SYSTIML);
+- hi = rd32(E1000_SYSTIMH);
++ lo = E1000_READ_REG(hw, E1000_SYSTIML);
++ hi = E1000_READ_REG(hw, E1000_SYSTIMH);
+
+ val = ((u64) hi) << 32;
+ val |= lo;
+@@ -93,21 +109,24 @@
+ return val;
+ }
+
+-/* SYSTIM read access for the 82580 */
++/*
++ * SYSTIM read access for the 82580
++ */
++
+ static cycle_t igb_ptp_read_82580(const struct cyclecounter *cc)
+ {
+ struct igb_adapter *igb = container_of(cc, struct igb_adapter, cc);
+ struct e1000_hw *hw = &igb->hw;
+- u32 lo, hi;
+ u64 val;
++ u32 lo, hi;
+
+ /* The timestamp latches on lowest register read. For the 82580
+ * the lowest register is SYSTIMR instead of SYSTIML. However we only
+ * need to provide nanosecond resolution, so we just ignore it.
+ */
+- rd32(E1000_SYSTIMR);
+- lo = rd32(E1000_SYSTIML);
+- hi = rd32(E1000_SYSTIMH);
++ E1000_READ_REG(hw, E1000_SYSTIMR);
++ lo = E1000_READ_REG(hw, E1000_SYSTIML);
++ hi = E1000_READ_REG(hw, E1000_SYSTIMH);
+
+ val = ((u64) hi) << 32;
+ val |= lo;
+@@ -115,7 +134,10 @@
+ return val;
+ }
+
+-/* SYSTIM read access for I210/I211 */
++/*
++ * SYSTIM read access for I210/I211
++ */
++
+ static void igb_ptp_read_i210(struct igb_adapter *adapter, struct timespec *ts)
+ {
+ struct e1000_hw *hw = &adapter->hw;
+@@ -125,9 +147,9 @@
+ * lowest register is SYSTIMR. Since we only need to provide nanosecond
+ * resolution, we can ignore it.
+ */
+- rd32(E1000_SYSTIMR);
+- nsec = rd32(E1000_SYSTIML);
+- sec = rd32(E1000_SYSTIMH);
++ E1000_READ_REG(hw, E1000_SYSTIMR);
++ nsec = E1000_READ_REG(hw, E1000_SYSTIML);
++ sec = E1000_READ_REG(hw, E1000_SYSTIMH);
+
+ ts->tv_sec = sec;
+ ts->tv_nsec = nsec;
+@@ -138,11 +160,12 @@
+ {
+ struct e1000_hw *hw = &adapter->hw;
+
+- /* Writing the SYSTIMR register is not necessary as it only provides
++ /*
++ * Writing the SYSTIMR register is not necessary as it only provides
+ * sub-nanosecond resolution.
+ */
+- wr32(E1000_SYSTIML, ts->tv_nsec);
+- wr32(E1000_SYSTIMH, ts->tv_sec);
++ E1000_WRITE_REG(hw, E1000_SYSTIML, ts->tv_nsec);
++ E1000_WRITE_REG(hw, E1000_SYSTIMH, ts->tv_sec);
+ }
+
+ /**
+@@ -172,8 +195,8 @@
+ switch (adapter->hw.mac.type) {
+ case e1000_82576:
+ case e1000_82580:
+- case e1000_i354:
+ case e1000_i350:
++ case e1000_i354:
+ spin_lock_irqsave(&adapter->tmreg_lock, flags);
+
+ ns = timecounter_cyc2time(&adapter->tc, systim);
+@@ -195,7 +218,10 @@
+ }
+ }
+
+-/* PTP clock operations */
++/*
++ * PTP clock operations
++ */
++
+ static int igb_ptp_adjfreq_82576(struct ptp_clock_info *ptp, s32 ppb)
+ {
+ struct igb_adapter *igb = container_of(ptp, struct igb_adapter,
+@@ -220,7 +246,8 @@
+ else
+ incvalue += rate;
+
+- wr32(E1000_TIMINCA, INCPERIOD_82576 | (incvalue & INCVALUE_82576_MASK));
++ E1000_WRITE_REG(hw, E1000_TIMINCA, INCPERIOD_82576
++ | (incvalue & INCVALUE_82576_MASK));
+
+ return 0;
+ }
+@@ -242,11 +269,24 @@
+ rate <<= 26;
+ rate = div_u64(rate, 1953125);
+
++ /* At 2.5G speeds, the TIMINCA register on I354 updates the clock 2.5x
++ * as quickly. Account for this by dividing the adjustment by 2.5.
++ */
++ if (hw->mac.type == e1000_i354) {
++ u32 status = E1000_READ_REG(hw, E1000_STATUS);
++
++ if ((status & E1000_STATUS_2P5_SKU) &&
++ !(status & E1000_STATUS_2P5_SKU_OVER)) {
++ rate <<= 1;
++ rate = div_u64(rate, 5);
++ }
++ }
++
+ inca = rate & INCVALUE_MASK;
+ if (neg_adj)
+ inca |= ISGN;
+
+- wr32(E1000_TIMINCA, inca);
++ E1000_WRITE_REG(hw, E1000_TIMINCA, inca);
+
+ return 0;
+ }
+@@ -287,14 +327,13 @@
+ return 0;
+ }
+
+-static int igb_ptp_gettime_82576(struct ptp_clock_info *ptp,
+- struct timespec *ts)
++static int igb_ptp_gettime64_82576(struct ptp_clock_info *ptp,
++ struct timespec64 *ts64)
+ {
+ struct igb_adapter *igb = container_of(ptp, struct igb_adapter,
+ ptp_caps);
+ unsigned long flags;
+ u64 ns;
+- u32 remainder;
+
+ spin_lock_irqsave(&igb->tmreg_lock, flags);
+
+@@ -302,28 +341,99 @@
+
+ spin_unlock_irqrestore(&igb->tmreg_lock, flags);
+
+- ts->tv_sec = div_u64_rem(ns, 1000000000, &remainder);
+- ts->tv_nsec = remainder;
++ *ts64 = ns_to_timespec64(ns);
+
+ return 0;
+ }
+
+-static int igb_ptp_gettime_i210(struct ptp_clock_info *ptp,
+- struct timespec *ts)
++static int igb_ptp_gettime64_i210(struct ptp_clock_info *ptp,
++ struct timespec64 *ts64)
++{
++ struct igb_adapter *igb = container_of(ptp, struct igb_adapter,
++ ptp_caps);
++ struct timespec ts;
++ unsigned long flags;
++
++ spin_lock_irqsave(&igb->tmreg_lock, flags);
++
++ igb_ptp_read_i210(igb, &ts);
++ *ts64 = timespec_to_timespec64(ts);
++
++ spin_unlock_irqrestore(&igb->tmreg_lock, flags);
++
++ return 0;
++}
++
++#ifdef HAVE_PTP_CLOCK_INFO_GETTIME64
++static int igb_ptp_settime64_82576(struct ptp_clock_info *ptp,
++ const struct timespec64 *ts64)
++{
++ struct igb_adapter *igb = container_of(ptp, struct igb_adapter,
++ ptp_caps);
++ unsigned long flags;
++ u64 ns;
++
++ ns = timespec64_to_ns(ts64);
++
++ spin_lock_irqsave(&igb->tmreg_lock, flags);
++
++ timecounter_init(&igb->tc, &igb->cc, ns);
++
++ spin_unlock_irqrestore(&igb->tmreg_lock, flags);
++
++ return 0;
++}
++
++#endif
++static int igb_ptp_settime64_i210(struct ptp_clock_info *ptp,
++ const struct timespec64 *ts64)
+ {
+ struct igb_adapter *igb = container_of(ptp, struct igb_adapter,
+ ptp_caps);
++ struct timespec ts;
+ unsigned long flags;
+
++ ts = timespec64_to_timespec(*ts64);
+ spin_lock_irqsave(&igb->tmreg_lock, flags);
+
+- igb_ptp_read_i210(igb, ts);
++ igb_ptp_write_i210(igb, &ts);
+
+ spin_unlock_irqrestore(&igb->tmreg_lock, flags);
+
+ return 0;
+ }
+
++#ifndef HAVE_PTP_CLOCK_INFO_GETTIME64
++static int igb_ptp_gettime_82576(struct ptp_clock_info *ptp,
++ struct timespec *ts)
++{
++ struct timespec64 ts64;
++ int err;
++
++ err = igb_ptp_gettime64_82576(ptp, &ts64);
++ if (err)
++ return err;
++
++ *ts = timespec64_to_timespec(ts64);
++
++ return 0;
++}
++
++static int igb_ptp_gettime_i210(struct ptp_clock_info *ptp,
++ struct timespec *ts)
++{
++ struct timespec64 ts64;
++ int err;
++
++ err = igb_ptp_gettime64_i210(ptp, &ts64);
++ if (err)
++ return err;
++
++ *ts = timespec64_to_timespec(ts64);
++
++ return 0;
++}
++
+ static int igb_ptp_settime_82576(struct ptp_clock_info *ptp,
+ const struct timespec *ts)
+ {
+@@ -360,8 +470,9 @@
+ return 0;
+ }
+
+-static int igb_ptp_feature_enable(struct ptp_clock_info *ptp,
+- struct ptp_clock_request *rq, int on)
++#endif
++static int igb_ptp_enable(struct ptp_clock_info *ptp,
++ struct ptp_clock_request *rq, int on)
+ {
+ return -EOPNOTSUPP;
+ }
+@@ -372,8 +483,8 @@
+ *
+ * This work function polls the TSYNCTXCTL valid bit to determine when a
+ * timestamp has been taken for the current stored skb.
+- **/
+-static void igb_ptp_tx_work(struct work_struct *work)
++ */
++void igb_ptp_tx_work(struct work_struct *work)
+ {
+ struct igb_adapter *adapter = container_of(work, struct igb_adapter,
+ ptp_tx_work);
+@@ -393,7 +504,7 @@
+ return;
+ }
+
+- tsynctxctl = rd32(E1000_TSYNCTXCTL);
++ tsynctxctl = E1000_READ_REG(hw, E1000_TSYNCTXCTL);
+ if (tsynctxctl & E1000_TSYNCTXCTL_VALID)
+ igb_ptp_tx_hwtstamp(adapter);
+ else
+@@ -401,15 +512,16 @@
+ schedule_work(&adapter->ptp_tx_work);
+ }
+
+-static void igb_ptp_overflow_check(struct work_struct *work)
++static void igb_ptp_overflow_check_82576(struct work_struct *work)
+ {
+ struct igb_adapter *igb =
+ container_of(work, struct igb_adapter, ptp_overflow_work.work);
+- struct timespec ts;
++ struct timespec64 ts64;
+
+- igb->ptp_caps.gettime(&igb->ptp_caps, &ts);
++ igb_ptp_gettime64_82576(&igb->ptp_caps, &ts64);
+
+- pr_debug("igb overflow check at %ld.%09lu\n", ts.tv_sec, ts.tv_nsec);
++ pr_debug("igb overflow check at %lld.%09lu\n",
++ (long long)ts64.tv_sec, ts64.tv_nsec);
+
+ schedule_delayed_work(&igb->ptp_overflow_work,
+ IGB_SYSTIM_OVERFLOW_PERIOD);
+@@ -423,11 +535,11 @@
+ * dropped an Rx packet that was timestamped when the ring is full. The
+ * particular error is rare but leaves the device in a state unable to timestamp
+ * any future packets.
+- **/
++ */
+ void igb_ptp_rx_hang(struct igb_adapter *adapter)
+ {
+ struct e1000_hw *hw = &adapter->hw;
+- u32 tsyncrxctl = rd32(E1000_TSYNCRXCTL);
++ u32 tsyncrxctl = E1000_READ_REG(hw, E1000_TSYNCRXCTL);
+ unsigned long rx_event;
+
+ if (hw->mac.type != e1000_82576)
+@@ -448,7 +560,7 @@
+
+ /* Only need to read the high RXSTMP register to clear the lock */
+ if (time_is_before_jiffies(rx_event + 5 * HZ)) {
+- rd32(E1000_RXSTMPH);
++ E1000_READ_REG(hw, E1000_RXSTMPH);
+ adapter->last_rx_ptp_check = jiffies;
+ adapter->rx_hwtstamp_cleared++;
+ dev_warn(&adapter->pdev->dev, "clearing Rx timestamp hang\n");
+@@ -462,15 +574,15 @@
+ * If we were asked to do hardware stamping and such a time stamp is
+ * available, then it must have been for this skb here because we only
+ * allow only one such packet into the queue.
+- **/
+-static void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter)
++ */
++void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter)
+ {
+ struct e1000_hw *hw = &adapter->hw;
+ struct skb_shared_hwtstamps shhwtstamps;
+ u64 regval;
+
+- regval = rd32(E1000_TXSTMPL);
+- regval |= (u64)rd32(E1000_TXSTMPH) << 32;
++ regval = E1000_READ_REG(hw, E1000_TXSTMPL);
++ regval |= (u64)E1000_READ_REG(hw, E1000_TXSTMPH) << 32;
+
+ igb_ptp_systim_to_hwtstamp(adapter, &shhwtstamps, regval);
+ skb_tstamp_tx(adapter->ptp_tx_skb, &shhwtstamps);
+@@ -488,14 +600,15 @@
+ * This function is meant to retrieve a timestamp from the first buffer of an
+ * incoming frame. The value is stored in little endian format starting on
+ * byte 8.
+- **/
++ */
+ void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector,
+ unsigned char *va,
+ struct sk_buff *skb)
+ {
+ __le64 *regval = (__le64 *)va;
+
+- /* The timestamp is recorded in little endian format.
++ /*
++ * The timestamp is recorded in little endian format.
+ * DWORD: 0 1 2 3
+ * Field: Reserved Reserved SYSTIML SYSTIMH
+ */
+@@ -510,7 +623,7 @@
+ *
+ * This function is meant to retrieve a timestamp from the internal registers
+ * of the adapter and store it in the skb.
+- **/
++ */
+ void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector,
+ struct sk_buff *skb)
+ {
+@@ -518,7 +631,8 @@
+ struct e1000_hw *hw = &adapter->hw;
+ u64 regval;
+
+- /* If this bit is set, then the RX registers contain the time stamp. No
++ /*
++ * If this bit is set, then the RX registers contain the time stamp. No
+ * other packet will be time stamped until we read these registers, so
+ * read the registers to make them available again. Because only one
+ * packet can be time stamped at a time, we know that the register
+@@ -528,11 +642,11 @@
+ * If nothing went wrong, then it should have a shared tx_flags that we
+ * can turn into a skb_shared_hwtstamps.
+ */
+- if (!(rd32(E1000_TSYNCRXCTL) & E1000_TSYNCRXCTL_VALID))
++ if (!(E1000_READ_REG(hw, E1000_TSYNCRXCTL) & E1000_TSYNCRXCTL_VALID))
+ return;
+
+- regval = rd32(E1000_RXSTMPL);
+- regval |= (u64)rd32(E1000_RXSTMPH) << 32;
++ regval = E1000_READ_REG(hw, E1000_RXSTMPL);
++ regval |= (u64)E1000_READ_REG(hw, E1000_RXSTMPH) << 32;
+
+ igb_ptp_systim_to_hwtstamp(adapter, skb_hwtstamps(skb), regval);
+
+@@ -576,6 +690,7 @@
+ * type has to be specified. Matching the kind of event packet is
+ * not supported, with the exception of "all V2 events regardless of
+ * level 2 or 4".
++ *
+ */
+ static int igb_ptp_set_timestamp_mode(struct igb_adapter *adapter,
+ struct hwtstamp_config *config)
+@@ -631,7 +746,8 @@
+ break;
+ case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+ case HWTSTAMP_FILTER_ALL:
+- /* 82576 cannot timestamp all packets, which it needs to do to
++ /*
++ * 82576 cannot timestamp all packets, which it needs to do to
+ * support both V1 Sync and Delay_Req messages
+ */
+ if (hw->mac.type != e1000_82576) {
+@@ -651,9 +767,10 @@
+ return 0;
+ }
+
+- /* Per-packet timestamping only works if all packets are
++ /*
++ * Per-packet timestamping only works if all packets are
+ * timestamped, so enable timestamping in all packets as
+- * long as one Rx filter was configured.
++ * long as one rx filter was configured.
+ */
+ if ((hw->mac.type >= e1000_82580) && tsync_rx_ctl) {
+ tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED;
+@@ -664,63 +781,63 @@
+
+ if ((hw->mac.type == e1000_i210) ||
+ (hw->mac.type == e1000_i211)) {
+- regval = rd32(E1000_RXPBS);
++ regval = E1000_READ_REG(hw, E1000_RXPBS);
+ regval |= E1000_RXPBS_CFG_TS_EN;
+- wr32(E1000_RXPBS, regval);
++ E1000_WRITE_REG(hw, E1000_RXPBS, regval);
+ }
+ }
+
+ /* enable/disable TX */
+- regval = rd32(E1000_TSYNCTXCTL);
++ regval = E1000_READ_REG(hw, E1000_TSYNCTXCTL);
+ regval &= ~E1000_TSYNCTXCTL_ENABLED;
+ regval |= tsync_tx_ctl;
+- wr32(E1000_TSYNCTXCTL, regval);
++ E1000_WRITE_REG(hw, E1000_TSYNCTXCTL, regval);
+
+ /* enable/disable RX */
+- regval = rd32(E1000_TSYNCRXCTL);
++ regval = E1000_READ_REG(hw, E1000_TSYNCRXCTL);
+ regval &= ~(E1000_TSYNCRXCTL_ENABLED | E1000_TSYNCRXCTL_TYPE_MASK);
+ regval |= tsync_rx_ctl;
+- wr32(E1000_TSYNCRXCTL, regval);
++ E1000_WRITE_REG(hw, E1000_TSYNCRXCTL, regval);
+
+ /* define which PTP packets are time stamped */
+- wr32(E1000_TSYNCRXCFG, tsync_rx_cfg);
++ E1000_WRITE_REG(hw, E1000_TSYNCRXCFG, tsync_rx_cfg);
+
+ /* define ethertype filter for timestamped packets */
+ if (is_l2)
+- wr32(E1000_ETQF(3),
++ E1000_WRITE_REG(hw, E1000_ETQF(3),
+ (E1000_ETQF_FILTER_ENABLE | /* enable filter */
+ E1000_ETQF_1588 | /* enable timestamping */
+ ETH_P_1588)); /* 1588 eth protocol type */
+ else
+- wr32(E1000_ETQF(3), 0);
++ E1000_WRITE_REG(hw, E1000_ETQF(3), 0);
+
+ /* L4 Queue Filter[3]: filter by destination port and protocol */
+ if (is_l4) {
+ u32 ftqf = (IPPROTO_UDP /* UDP */
+- | E1000_FTQF_VF_BP /* VF not compared */
+- | E1000_FTQF_1588_TIME_STAMP /* Enable Timestamping */
+- | E1000_FTQF_MASK); /* mask all inputs */
++ | E1000_FTQF_VF_BP /* VF not compared */
++ | E1000_FTQF_1588_TIME_STAMP /* Enable Timestamp */
++ | E1000_FTQF_MASK); /* mask all inputs */
+ ftqf &= ~E1000_FTQF_MASK_PROTO_BP; /* enable protocol check */
+
+- wr32(E1000_IMIR(3), htons(PTP_EV_PORT));
+- wr32(E1000_IMIREXT(3),
++ E1000_WRITE_REG(hw, E1000_IMIR(3), htons(PTP_EV_PORT));
++ E1000_WRITE_REG(hw, E1000_IMIREXT(3),
+ (E1000_IMIREXT_SIZE_BP | E1000_IMIREXT_CTRL_BP));
+ if (hw->mac.type == e1000_82576) {
+ /* enable source port check */
+- wr32(E1000_SPQF(3), htons(PTP_EV_PORT));
++ E1000_WRITE_REG(hw, E1000_SPQF(3), htons(PTP_EV_PORT));
+ ftqf &= ~E1000_FTQF_MASK_SOURCE_PORT_BP;
+ }
+- wr32(E1000_FTQF(3), ftqf);
++ E1000_WRITE_REG(hw, E1000_FTQF(3), ftqf);
+ } else {
+- wr32(E1000_FTQF(3), E1000_FTQF_MASK);
++ E1000_WRITE_REG(hw, E1000_FTQF(3), E1000_FTQF_MASK);
+ }
+- wrfl();
++ E1000_WRITE_FLUSH(hw);
+
+ /* clear TX/RX time stamp registers, just to be sure */
+- regval = rd32(E1000_TXSTMPL);
+- regval = rd32(E1000_TXSTMPH);
+- regval = rd32(E1000_RXSTMPL);
+- regval = rd32(E1000_RXSTMPH);
++ regval = E1000_READ_REG(hw, E1000_TXSTMPL);
++ regval = E1000_READ_REG(hw, E1000_TXSTMPH);
++ regval = E1000_READ_REG(hw, E1000_RXSTMPL);
++ regval = E1000_READ_REG(hw, E1000_RXSTMPH);
+
+ return 0;
+ }
+@@ -766,19 +883,25 @@
+ adapter->ptp_caps.pps = 0;
+ adapter->ptp_caps.adjfreq = igb_ptp_adjfreq_82576;
+ adapter->ptp_caps.adjtime = igb_ptp_adjtime_82576;
++#ifdef HAVE_PTP_CLOCK_INFO_GETTIME64
++ adapter->ptp_caps.gettime64 = igb_ptp_gettime64_82576;
++ adapter->ptp_caps.settime64 = igb_ptp_settime64_82576;
++#else
+ adapter->ptp_caps.gettime = igb_ptp_gettime_82576;
+ adapter->ptp_caps.settime = igb_ptp_settime_82576;
+- adapter->ptp_caps.enable = igb_ptp_feature_enable;
++#endif
++ adapter->ptp_caps.enable = igb_ptp_enable;
+ adapter->cc.read = igb_ptp_read_82576;
+ adapter->cc.mask = CLOCKSOURCE_MASK(64);
+ adapter->cc.mult = 1;
+ adapter->cc.shift = IGB_82576_TSYNC_SHIFT;
+ /* Dial the nominal frequency. */
+- wr32(E1000_TIMINCA, INCPERIOD_82576 | INCVALUE_82576);
++ E1000_WRITE_REG(hw, E1000_TIMINCA,
++ INCPERIOD_82576 | INCVALUE_82576);
+ break;
+ case e1000_82580:
+- case e1000_i354:
+ case e1000_i350:
++ case e1000_i354:
+ snprintf(adapter->ptp_caps.name, 16, "%pm", netdev->dev_addr);
+ adapter->ptp_caps.owner = THIS_MODULE;
+ adapter->ptp_caps.max_adj = 62499999;
+@@ -786,15 +909,20 @@
+ adapter->ptp_caps.pps = 0;
+ adapter->ptp_caps.adjfreq = igb_ptp_adjfreq_82580;
+ adapter->ptp_caps.adjtime = igb_ptp_adjtime_82576;
++#ifdef HAVE_PTP_CLOCK_INFO_GETTIME64
++ adapter->ptp_caps.gettime64 = igb_ptp_gettime64_82576;
++ adapter->ptp_caps.settime64 = igb_ptp_settime64_82576;
++#else
+ adapter->ptp_caps.gettime = igb_ptp_gettime_82576;
+ adapter->ptp_caps.settime = igb_ptp_settime_82576;
+- adapter->ptp_caps.enable = igb_ptp_feature_enable;
++#endif
++ adapter->ptp_caps.enable = igb_ptp_enable;
+ adapter->cc.read = igb_ptp_read_82580;
+ adapter->cc.mask = CLOCKSOURCE_MASK(IGB_NBITS_82580);
+ adapter->cc.mult = 1;
+ adapter->cc.shift = 0;
+ /* Enable the timer functions by clearing bit 31. */
+- wr32(E1000_TSAUXC, 0x0);
++ E1000_WRITE_REG(hw, E1000_TSAUXC, 0x0);
+ break;
+ case e1000_i210:
+ case e1000_i211:
+@@ -805,33 +933,38 @@
+ adapter->ptp_caps.pps = 0;
+ adapter->ptp_caps.adjfreq = igb_ptp_adjfreq_82580;
+ adapter->ptp_caps.adjtime = igb_ptp_adjtime_i210;
++#ifdef HAVE_PTP_CLOCK_INFO_GETTIME64
++ adapter->ptp_caps.gettime64 = igb_ptp_gettime64_i210;
++ adapter->ptp_caps.settime64 = igb_ptp_settime64_i210;
++#else
+ adapter->ptp_caps.gettime = igb_ptp_gettime_i210;
+ adapter->ptp_caps.settime = igb_ptp_settime_i210;
+- adapter->ptp_caps.enable = igb_ptp_feature_enable;
++#endif
++ adapter->ptp_caps.enable = igb_ptp_enable;
+ /* Enable the timer functions by clearing bit 31. */
+- wr32(E1000_TSAUXC, 0x0);
++ E1000_WRITE_REG(hw, E1000_TSAUXC, 0x0);
+ break;
+ default:
+ adapter->ptp_clock = NULL;
+ return;
+ }
+
+- wrfl();
++ E1000_WRITE_FLUSH(hw);
+
+ spin_lock_init(&adapter->tmreg_lock);
+ INIT_WORK(&adapter->ptp_tx_work, igb_ptp_tx_work);
+
+ /* Initialize the clock and overflow work for devices that need it. */
+ if ((hw->mac.type == e1000_i210) || (hw->mac.type == e1000_i211)) {
+- struct timespec ts = ktime_to_timespec(ktime_get_real());
++ struct timespec64 ts = ktime_to_timespec64(ktime_get_real());
+
+- igb_ptp_settime_i210(&adapter->ptp_caps, &ts);
++ igb_ptp_settime64_i210(&adapter->ptp_caps, &ts);
+ } else {
+ timecounter_init(&adapter->tc, &adapter->cc,
+ ktime_to_ns(ktime_get_real()));
+
+ INIT_DELAYED_WORK(&adapter->ptp_overflow_work,
+- igb_ptp_overflow_check);
++ igb_ptp_overflow_check_82576);
+
+ schedule_delayed_work(&adapter->ptp_overflow_work,
+ IGB_SYSTIM_OVERFLOW_PERIOD);
+@@ -839,8 +972,8 @@
+
+ /* Initialize the time sync interrupts for devices that support it. */
+ if (hw->mac.type >= e1000_82580) {
+- wr32(E1000_TSIM, TSYNC_INTERRUPTS);
+- wr32(E1000_IMS, E1000_IMS_TS);
++ E1000_WRITE_REG(hw, E1000_TSIM, E1000_TSIM_TXTS);
++ E1000_WRITE_REG(hw, E1000_IMS, E1000_IMS_TS);
+ }
+
+ adapter->tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE;
+@@ -869,8 +1002,8 @@
+ switch (adapter->hw.mac.type) {
+ case e1000_82576:
+ case e1000_82580:
+- case e1000_i354:
+ case e1000_i350:
++ case e1000_i354:
+ cancel_delayed_work_sync(&adapter->ptp_overflow_work);
+ break;
+ case e1000_i210:
+@@ -915,17 +1048,18 @@
+ switch (adapter->hw.mac.type) {
+ case e1000_82576:
+ /* Dial the nominal frequency. */
+- wr32(E1000_TIMINCA, INCPERIOD_82576 | INCVALUE_82576);
++ E1000_WRITE_REG(hw, E1000_TIMINCA, INCPERIOD_82576 |
++ INCVALUE_82576);
+ break;
+ case e1000_82580:
+- case e1000_i354:
+ case e1000_i350:
++ case e1000_i354:
+ case e1000_i210:
+ case e1000_i211:
+ /* Enable the timer functions and interrupts. */
+- wr32(E1000_TSAUXC, 0x0);
+- wr32(E1000_TSIM, TSYNC_INTERRUPTS);
+- wr32(E1000_IMS, E1000_IMS_TS);
++ E1000_WRITE_REG(hw, E1000_TSAUXC, 0x0);
++ E1000_WRITE_REG(hw, E1000_TSIM, E1000_TSIM_TXTS);
++ E1000_WRITE_REG(hw, E1000_IMS, E1000_IMS_TS);
+ break;
+ default:
+ /* No work to do. */
+@@ -934,11 +1068,12 @@
+
+ /* Re-initialize the timer. */
+ if ((hw->mac.type == e1000_i210) || (hw->mac.type == e1000_i211)) {
+- struct timespec ts = ktime_to_timespec(ktime_get_real());
++ struct timespec64 ts64 = ktime_to_timespec64(ktime_get_real());
+
+- igb_ptp_settime_i210(&adapter->ptp_caps, &ts);
++ igb_ptp_settime64_i210(&adapter->ptp_caps, &ts64);
+ } else {
+ timecounter_init(&adapter->tc, &adapter->cc,
+ ktime_to_ns(ktime_get_real()));
+ }
+ }
++#endif /* HAVE_PTP_1588_CLOCK */
+diff -Nu a/drivers/net/ethernet/intel/igb/igb_regtest.h b/drivers/net/ethernet/intel/igb/igb_regtest.h
+--- a/drivers/net/ethernet/intel/igb/igb_regtest.h 1970-01-01 00:00:00.000000000 +0000
++++ b/drivers/net/ethernet/intel/igb/igb_regtest.h 2016-11-14 14:32:08.579567168 +0000
+@@ -0,0 +1,256 @@
++/*******************************************************************************
++
++ Intel(R) Gigabit Ethernet Linux driver
++ Copyright(c) 2007-2015 Intel Corporation.
++
++ This program is free software; you can redistribute it and/or modify it
++ under the terms and conditions of the GNU General Public License,
++ version 2, as published by the Free Software Foundation.
++
++ This program is distributed in the hope it will be useful, but WITHOUT
++ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ more details.
++
++ The full GNU General Public License is included in this distribution in
++ the file called "COPYING".
++
++ Contact Information:
++ Linux NICS
++ e1000-devel Mailing List
++ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
++
++*******************************************************************************/
++
++/* ethtool register test data */
++struct igb_reg_test {
++ u16 reg;
++ u16 reg_offset;
++ u16 array_len;
++ u16 test_type;
++ u32 mask;
++ u32 write;
++};
++
++/* In the hardware, registers are laid out either singly, in arrays
++ * spaced 0x100 bytes apart, or in contiguous tables. We assume
++ * most tests take place on arrays or single registers (handled
++ * as a single-element array) and special-case the tables.
++ * Table tests are always pattern tests.
++ *
++ * We also make provision for some required setup steps by specifying
++ * registers to be written without any read-back testing.
++ */
++
++#define PATTERN_TEST 1
++#define SET_READ_TEST 2
++#define WRITE_NO_TEST 3
++#define TABLE32_TEST 4
++#define TABLE64_TEST_LO 5
++#define TABLE64_TEST_HI 6
++
++/* i210 reg test */
++static struct igb_reg_test reg_test_i210[] = {
++ { E1000_FCAL, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
++ { E1000_FCAH, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF },
++ { E1000_FCT, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF },
++ { E1000_RDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
++ { E1000_RDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
++ { E1000_RDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
++ /* RDH is read-only for i210, only test RDT. */
++ { E1000_RDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
++ { E1000_FCRTH, 0x100, 1, PATTERN_TEST, 0x0003FFF0, 0x0003FFF0 },
++ { E1000_FCTTV, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
++ { E1000_TIPG, 0x100, 1, PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF },
++ { E1000_TDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
++ { E1000_TDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
++ { E1000_TDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
++ { E1000_TDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
++ { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
++ { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB },
++ { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF },
++ { E1000_TCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
++ { E1000_RA, 0, 16, TABLE64_TEST_LO,
++ 0xFFFFFFFF, 0xFFFFFFFF },
++ { E1000_RA, 0, 16, TABLE64_TEST_HI,
++ 0x900FFFFF, 0xFFFFFFFF },
++ { E1000_MTA, 0, 128, TABLE32_TEST,
++ 0xFFFFFFFF, 0xFFFFFFFF },
++ { 0, 0, 0, 0 }
++};
++
++/* i350 reg test */
++static struct igb_reg_test reg_test_i350[] = {
++ { E1000_FCAL, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
++ { E1000_FCAH, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF },
++ { E1000_FCT, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF },
++ /* VET is readonly on i350 */
++ { E1000_RDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
++ { E1000_RDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
++ { E1000_RDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
++ { E1000_RDBAL(4), 0x40, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
++ { E1000_RDBAH(4), 0x40, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
++ { E1000_RDLEN(4), 0x40, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
++ /* RDH is read-only for i350, only test RDT. */
++ { E1000_RDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
++ { E1000_RDT(4), 0x40, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
++ { E1000_FCRTH, 0x100, 1, PATTERN_TEST, 0x0000FFF0, 0x0000FFF0 },
++ { E1000_FCTTV, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
++ { E1000_TIPG, 0x100, 1, PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF },
++ { E1000_TDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
++ { E1000_TDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
++ { E1000_TDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
++ { E1000_TDBAL(4), 0x40, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
++ { E1000_TDBAH(4), 0x40, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
++ { E1000_TDLEN(4), 0x40, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
++ { E1000_TDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
++ { E1000_TDT(4), 0x40, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
++ { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
++ { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB },
++ { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF },
++ { E1000_TCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
++ { E1000_RA, 0, 16, TABLE64_TEST_LO,
++ 0xFFFFFFFF, 0xFFFFFFFF },
++ { E1000_RA, 0, 16, TABLE64_TEST_HI,
++ 0xC3FFFFFF, 0xFFFFFFFF },
++ { E1000_RA2, 0, 16, TABLE64_TEST_LO,
++ 0xFFFFFFFF, 0xFFFFFFFF },
++ { E1000_RA2, 0, 16, TABLE64_TEST_HI,
++ 0xC3FFFFFF, 0xFFFFFFFF },
++ { E1000_MTA, 0, 128, TABLE32_TEST,
++ 0xFFFFFFFF, 0xFFFFFFFF },
++ { 0, 0, 0, 0 }
++};
++
++/* 82580 reg test */
++static struct igb_reg_test reg_test_82580[] = {
++ { E1000_FCAL, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
++ { E1000_FCAH, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF },
++ { E1000_FCT, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF },
++ { E1000_VET, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
++ { E1000_RDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
++ { E1000_RDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
++ { E1000_RDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF },
++ { E1000_RDBAL(4), 0x40, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
++ { E1000_RDBAH(4), 0x40, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
++ { E1000_RDLEN(4), 0x40, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF },
++ /* RDH is read-only for 82580, only test RDT. */
++ { E1000_RDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
++ { E1000_RDT(4), 0x40, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
++ { E1000_FCRTH, 0x100, 1, PATTERN_TEST, 0x0000FFF0, 0x0000FFF0 },
++ { E1000_FCTTV, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
++ { E1000_TIPG, 0x100, 1, PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF },
++ { E1000_TDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
++ { E1000_TDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
++ { E1000_TDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF },
++ { E1000_TDBAL(4), 0x40, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
++ { E1000_TDBAH(4), 0x40, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
++ { E1000_TDLEN(4), 0x40, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF },
++ { E1000_TDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
++ { E1000_TDT(4), 0x40, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
++ { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
++ { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB },
++ { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF },
++ { E1000_TCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
++ { E1000_RA, 0, 16, TABLE64_TEST_LO,
++ 0xFFFFFFFF, 0xFFFFFFFF },
++ { E1000_RA, 0, 16, TABLE64_TEST_HI,
++ 0x83FFFFFF, 0xFFFFFFFF },
++ { E1000_RA2, 0, 8, TABLE64_TEST_LO,
++ 0xFFFFFFFF, 0xFFFFFFFF },
++ { E1000_RA2, 0, 8, TABLE64_TEST_HI,
++ 0x83FFFFFF, 0xFFFFFFFF },
++ { E1000_MTA, 0, 128, TABLE32_TEST,
++ 0xFFFFFFFF, 0xFFFFFFFF },
++ { 0, 0, 0, 0 }
++};
++
++/* 82576 reg test */
++static struct igb_reg_test reg_test_82576[] = {
++ { E1000_FCAL, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
++ { E1000_FCAH, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF },
++ { E1000_FCT, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF },
++ { E1000_VET, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
++ { E1000_RDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
++ { E1000_RDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
++ { E1000_RDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF },
++ { E1000_RDBAL(4), 0x40, 12, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
++ { E1000_RDBAH(4), 0x40, 12, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
++ { E1000_RDLEN(4), 0x40, 12, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF },
++ /* Enable all queues before testing. */
++ { E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0,
++ E1000_RXDCTL_QUEUE_ENABLE },
++ { E1000_RXDCTL(4), 0x40, 12, WRITE_NO_TEST, 0,
++ E1000_RXDCTL_QUEUE_ENABLE },
++ /* RDH is read-only for 82576, only test RDT. */
++ { E1000_RDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
++ { E1000_RDT(4), 0x40, 12, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
++ { E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0, 0 },
++ { E1000_RXDCTL(4), 0x40, 12, WRITE_NO_TEST, 0, 0 },
++ { E1000_FCRTH, 0x100, 1, PATTERN_TEST, 0x0000FFF0, 0x0000FFF0 },
++ { E1000_FCTTV, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
++ { E1000_TIPG, 0x100, 1, PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF },
++ { E1000_TDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
++ { E1000_TDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
++ { E1000_TDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF },
++ { E1000_TDBAL(4), 0x40, 12, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
++ { E1000_TDBAH(4), 0x40, 12, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
++ { E1000_TDLEN(4), 0x40, 12, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF },
++ { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
++ { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB },
++ { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF },
++ { E1000_TCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
++ { E1000_RA, 0, 16, TABLE64_TEST_LO,
++ 0xFFFFFFFF, 0xFFFFFFFF },
++ { E1000_RA, 0, 16, TABLE64_TEST_HI,
++ 0x83FFFFFF, 0xFFFFFFFF },
++ { E1000_RA2, 0, 8, TABLE64_TEST_LO,
++ 0xFFFFFFFF, 0xFFFFFFFF },
++ { E1000_RA2, 0, 8, TABLE64_TEST_HI,
++ 0x83FFFFFF, 0xFFFFFFFF },
++ { E1000_MTA, 0, 128, TABLE32_TEST,
++ 0xFFFFFFFF, 0xFFFFFFFF },
++ { 0, 0, 0, 0 }
++};
++
++/* 82575 register test */
++static struct igb_reg_test reg_test_82575[] = {
++ { E1000_FCAL, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
++ { E1000_FCAH, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF },
++ { E1000_FCT, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF },
++ { E1000_VET, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
++ { E1000_RDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80,
++ 0xFFFFFFFF },
++ { E1000_RDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF,
++ 0xFFFFFFFF },
++ { E1000_RDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
++ /* Enable all four RX queues before testing. */
++ { E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0,
++ E1000_RXDCTL_QUEUE_ENABLE },
++ /* RDH is read-only for 82575, only test RDT. */
++ { E1000_RDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
++ { E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0, 0 },
++ { E1000_FCRTH, 0x100, 1, PATTERN_TEST, 0x0000FFF0, 0x0000FFF0 },
++ { E1000_FCTTV, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
++ { E1000_TIPG, 0x100, 1, PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF },
++ { E1000_TDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80,
++ 0xFFFFFFFF },
++ { E1000_TDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF,
++ 0xFFFFFFFF },
++ { E1000_TDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80,
++ 0x000FFFFF },
++ { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
++ { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB3FE, 0x003FFFFB },
++ { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB3FE, 0xFFFFFFFF },
++ { E1000_TCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
++ { E1000_TXCW, 0x100, 1, PATTERN_TEST, 0xC000FFFF, 0x0000FFFF },
++ { E1000_RA, 0, 16, TABLE64_TEST_LO,
++ 0xFFFFFFFF, 0xFFFFFFFF },
++ { E1000_RA, 0, 16, TABLE64_TEST_HI,
++ 0x800FFFFF, 0xFFFFFFFF },
++ { E1000_MTA, 0, 128, TABLE32_TEST,
++ 0xFFFFFFFF, 0xFFFFFFFF },
++ { 0, 0, 0, 0 }
++};
++
++
+diff -Nu a/drivers/net/ethernet/intel/igb/igb_vmdq.c b/drivers/net/ethernet/intel/igb/igb_vmdq.c
+--- a/drivers/net/ethernet/intel/igb/igb_vmdq.c 1970-01-01 00:00:00.000000000 +0000
++++ b/drivers/net/ethernet/intel/igb/igb_vmdq.c 2016-11-14 14:32:08.579567168 +0000
+@@ -0,0 +1,433 @@
++/*******************************************************************************
++
++ Intel(R) Gigabit Ethernet Linux driver
++ Copyright(c) 2007-2015 Intel Corporation.
++
++ This program is free software; you can redistribute it and/or modify it
++ under the terms and conditions of the GNU General Public License,
++ version 2, as published by the Free Software Foundation.
++
++ This program is distributed in the hope it will be useful, but WITHOUT
++ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ more details.
++
++ The full GNU General Public License is included in this distribution in
++ the file called "COPYING".
++
++ Contact Information:
++ Linux NICS
++ e1000-devel Mailing List
++ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
++
++*******************************************************************************/
++
++
++#include
++
++#include "igb.h"
++#include "igb_vmdq.h"
++#include
++
++#ifdef CONFIG_IGB_VMDQ_NETDEV
++int igb_vmdq_open(struct net_device *dev)
++{
++ struct igb_vmdq_adapter *vadapter = netdev_priv(dev);
++ struct igb_adapter *adapter = vadapter->real_adapter;
++ struct net_device *main_netdev = adapter->netdev;
++ int hw_queue = vadapter->rx_ring->queue_index +
++ adapter->vfs_allocated_count;
++
++ if (test_bit(__IGB_DOWN, &adapter->state)) {
++ DPRINTK(DRV, WARNING,
++ "Open %s before opening this device.\n",
++ main_netdev->name);
++ return -EAGAIN;
++ }
++ netif_carrier_off(dev);
++ vadapter->tx_ring->vmdq_netdev = dev;
++ vadapter->rx_ring->vmdq_netdev = dev;
++ if (is_valid_ether_addr(dev->dev_addr)) {
++ igb_del_mac_filter(adapter, dev->dev_addr, hw_queue);
++ igb_add_mac_filter(adapter, dev->dev_addr, hw_queue);
++ }
++ netif_carrier_on(dev);
++ return 0;
++}
++
++int igb_vmdq_close(struct net_device *dev)
++{
++ struct igb_vmdq_adapter *vadapter = netdev_priv(dev);
++ struct igb_adapter *adapter = vadapter->real_adapter;
++ int hw_queue = vadapter->rx_ring->queue_index +
++ adapter->vfs_allocated_count;
++
++ netif_carrier_off(dev);
++ igb_del_mac_filter(adapter, dev->dev_addr, hw_queue);
++
++ vadapter->tx_ring->vmdq_netdev = NULL;
++ vadapter->rx_ring->vmdq_netdev = NULL;
++ return 0;
++}
++
++netdev_tx_t igb_vmdq_xmit_frame(struct sk_buff *skb, struct net_device *dev)
++{
++ struct igb_vmdq_adapter *vadapter = netdev_priv(dev);
++
++ return igb_xmit_frame_ring(skb, vadapter->tx_ring);
++}
++
++struct net_device_stats *igb_vmdq_get_stats(struct net_device *dev)
++{
++ struct igb_vmdq_adapter *vadapter = netdev_priv(dev);
++ struct igb_adapter *adapter = vadapter->real_adapter;
++ struct e1000_hw *hw = &adapter->hw;
++ int hw_queue = vadapter->rx_ring->queue_index +
++ adapter->vfs_allocated_count;
++
++ vadapter->net_stats.rx_packets +=
++ E1000_READ_REG(hw, E1000_PFVFGPRC(hw_queue));
++ E1000_WRITE_REG(hw, E1000_PFVFGPRC(hw_queue), 0);
++ vadapter->net_stats.tx_packets +=
++ E1000_READ_REG(hw, E1000_PFVFGPTC(hw_queue));
++ E1000_WRITE_REG(hw, E1000_PFVFGPTC(hw_queue), 0);
++ vadapter->net_stats.rx_bytes +=
++ E1000_READ_REG(hw, E1000_PFVFGORC(hw_queue));
++ E1000_WRITE_REG(hw, E1000_PFVFGORC(hw_queue), 0);
++ vadapter->net_stats.tx_bytes +=
++ E1000_READ_REG(hw, E1000_PFVFGOTC(hw_queue));
++ E1000_WRITE_REG(hw, E1000_PFVFGOTC(hw_queue), 0);
++ vadapter->net_stats.multicast +=
++ E1000_READ_REG(hw, E1000_PFVFMPRC(hw_queue));
++ E1000_WRITE_REG(hw, E1000_PFVFMPRC(hw_queue), 0);
++ /* only return the current stats */
++ return &vadapter->net_stats;
++}
++
++/**
++ * igb_write_vm_addr_list - write unicast addresses to RAR table
++ * @netdev: network interface device structure
++ *
++ * Writes unicast address list to the RAR table.
++ * Returns: -ENOMEM on failure/insufficient address space
++ * 0 on no addresses written
++ * X on writing X addresses to the RAR table
++ **/
++static int igb_write_vm_addr_list(struct net_device *netdev)
++{
++ struct igb_vmdq_adapter *vadapter = netdev_priv(netdev);
++ struct igb_adapter *adapter = vadapter->real_adapter;
++ int count = 0;
++ int hw_queue = vadapter->rx_ring->queue_index +
++ adapter->vfs_allocated_count;
++
++ /* return ENOMEM indicating insufficient memory for addresses */
++ if (netdev_uc_count(netdev) > igb_available_rars(adapter))
++ return -ENOMEM;
++
++ if (!netdev_uc_empty(netdev)) {
++#ifdef NETDEV_HW_ADDR_T_UNICAST
++ struct netdev_hw_addr *ha;
++#else
++ struct dev_mc_list *ha;
++#endif
++ netdev_for_each_uc_addr(ha, netdev) {
++#ifdef NETDEV_HW_ADDR_T_UNICAST
++ igb_del_mac_filter(adapter, ha->addr, hw_queue);
++ igb_add_mac_filter(adapter, ha->addr, hw_queue);
++#else
++ igb_del_mac_filter(adapter, ha->da_addr, hw_queue);
++ igb_add_mac_filter(adapter, ha->da_addr, hw_queue);
++#endif
++ count++;
++ }
++ }
++ return count;
++}
++
++
++#define E1000_VMOLR_UPE 0x20000000 /* Unicast promiscuous mode */
++void igb_vmdq_set_rx_mode(struct net_device *dev)
++{
++ struct igb_vmdq_adapter *vadapter = netdev_priv(dev);
++ struct igb_adapter *adapter = vadapter->real_adapter;
++ struct e1000_hw *hw = &adapter->hw;
++ u32 vmolr, rctl;
++ int hw_queue = vadapter->rx_ring->queue_index +
++ adapter->vfs_allocated_count;
++
++ /* Check for Promiscuous and All Multicast modes */
++ vmolr = E1000_READ_REG(hw, E1000_VMOLR(hw_queue));
++
++ /* clear the affected bits */
++ vmolr &= ~(E1000_VMOLR_UPE | E1000_VMOLR_MPME |
++ E1000_VMOLR_ROPE | E1000_VMOLR_ROMPE);
++
++ if (dev->flags & IFF_PROMISC) {
++ vmolr |= E1000_VMOLR_UPE;
++ rctl = E1000_READ_REG(hw, E1000_RCTL);
++ rctl |= E1000_RCTL_UPE;
++ E1000_WRITE_REG(hw, E1000_RCTL, rctl);
++ } else {
++ rctl = E1000_READ_REG(hw, E1000_RCTL);
++ rctl &= ~E1000_RCTL_UPE;
++ E1000_WRITE_REG(hw, E1000_RCTL, rctl);
++ if (dev->flags & IFF_ALLMULTI) {
++ vmolr |= E1000_VMOLR_MPME;
++ } else {
++ /*
++ * Write addresses to the MTA, if the attempt fails
++ * then we should just turn on promiscous mode so
++ * that we can at least receive multicast traffic
++ */
++ if (igb_write_mc_addr_list(adapter->netdev) != 0)
++ vmolr |= E1000_VMOLR_ROMPE;
++ }
++#ifdef HAVE_SET_RX_MODE
++ /*
++ * Write addresses to available RAR registers, if there is not
++ * sufficient space to store all the addresses then enable
++ * unicast promiscous mode
++ */
++ if (igb_write_vm_addr_list(dev) < 0)
++ vmolr |= E1000_VMOLR_UPE;
++#endif
++ }
++ E1000_WRITE_REG(hw, E1000_VMOLR(hw_queue), vmolr);
++
++ return;
++}
++
++int igb_vmdq_set_mac(struct net_device *dev, void *p)
++{
++ struct sockaddr *addr = p;
++ struct igb_vmdq_adapter *vadapter = netdev_priv(dev);
++ struct igb_adapter *adapter = vadapter->real_adapter;
++ int hw_queue = vadapter->rx_ring->queue_index +
++ adapter->vfs_allocated_count;
++
++ igb_del_mac_filter(adapter, dev->dev_addr, hw_queue);
++ memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
++ return igb_add_mac_filter(adapter, dev->dev_addr, hw_queue);
++}
++
++int igb_vmdq_change_mtu(struct net_device *dev, int new_mtu)
++{
++ struct igb_vmdq_adapter *vadapter = netdev_priv(dev);
++ struct igb_adapter *adapter = vadapter->real_adapter;
++
++ if (adapter->netdev->mtu < new_mtu) {
++ DPRINTK(PROBE, INFO,
++ "Set MTU on %s to >= %d before changing MTU on %s\n",
++ adapter->netdev->name, new_mtu, dev->name);
++ return -EINVAL;
++ }
++ dev->mtu = new_mtu;
++ return 0;
++}
++
++void igb_vmdq_tx_timeout(struct net_device *dev)
++{
++ return;
++}
++
++void igb_vmdq_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
++{
++ struct igb_vmdq_adapter *vadapter = netdev_priv(dev);
++ struct igb_adapter *adapter = vadapter->real_adapter;
++ struct e1000_hw *hw = &adapter->hw;
++ int hw_queue = vadapter->rx_ring->queue_index +
++ adapter->vfs_allocated_count;
++
++ vadapter->vlgrp = grp;
++
++ igb_enable_vlan_tags(adapter);
++ E1000_WRITE_REG(hw, E1000_VMVIR(hw_queue), 0);
++
++ return;
++}
++void igb_vmdq_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
++{
++ struct igb_vmdq_adapter *vadapter = netdev_priv(dev);
++ struct igb_adapter *adapter = vadapter->real_adapter;
++#ifndef HAVE_NETDEV_VLAN_FEATURES
++ struct net_device *v_netdev;
++#endif
++ int hw_queue = vadapter->rx_ring->queue_index +
++ adapter->vfs_allocated_count;
++
++ /* attempt to add filter to vlvf array */
++ igb_vlvf_set(adapter, vid, TRUE, hw_queue);
++
++#ifndef HAVE_NETDEV_VLAN_FEATURES
++
++ /* Copy feature flags from netdev to the vlan netdev for this vid.
++ * This allows things like TSO to bubble down to our vlan device.
++ */
++ v_netdev = vlan_group_get_device(vadapter->vlgrp, vid);
++ v_netdev->features |= adapter->netdev->features;
++ vlan_group_set_device(vadapter->vlgrp, vid, v_netdev);
++#endif
++
++ return;
++}
++void igb_vmdq_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
++{
++ struct igb_vmdq_adapter *vadapter = netdev_priv(dev);
++ struct igb_adapter *adapter = vadapter->real_adapter;
++ int hw_queue = vadapter->rx_ring->queue_index +
++ adapter->vfs_allocated_count;
++
++ vlan_group_set_device(vadapter->vlgrp, vid, NULL);
++ /* remove vlan from VLVF table array */
++ igb_vlvf_set(adapter, vid, FALSE, hw_queue);
++
++
++ return;
++}
++
++static int igb_vmdq_get_settings(struct net_device *netdev,
++ struct ethtool_cmd *ecmd)
++{
++ struct igb_vmdq_adapter *vadapter = netdev_priv(netdev);
++ struct igb_adapter *adapter = vadapter->real_adapter;
++ struct e1000_hw *hw = &adapter->hw;
++ u32 status;
++
++ if (hw->phy.media_type == e1000_media_type_copper) {
++
++ ecmd->supported = (SUPPORTED_10baseT_Half |
++ SUPPORTED_10baseT_Full |
++ SUPPORTED_100baseT_Half |
++ SUPPORTED_100baseT_Full |
++ SUPPORTED_1000baseT_Full|
++ SUPPORTED_Autoneg |
++ SUPPORTED_TP);
++ ecmd->advertising = ADVERTISED_TP;
++
++ if (hw->mac.autoneg == 1) {
++ ecmd->advertising |= ADVERTISED_Autoneg;
++ /* the e1000 autoneg seems to match ethtool nicely */
++ ecmd->advertising |= hw->phy.autoneg_advertised;
++ }
++
++ ecmd->port = PORT_TP;
++ ecmd->phy_address = hw->phy.addr;
++ } else {
++ ecmd->supported = (SUPPORTED_1000baseT_Full |
++ SUPPORTED_FIBRE |
++ SUPPORTED_Autoneg);
++
++ ecmd->advertising = (ADVERTISED_1000baseT_Full |
++ ADVERTISED_FIBRE |
++ ADVERTISED_Autoneg);
++
++ ecmd->port = PORT_FIBRE;
++ }
++
++ ecmd->transceiver = XCVR_INTERNAL;
++
++ status = E1000_READ_REG(hw, E1000_STATUS);
++
++ if (status & E1000_STATUS_LU) {
++
++ if ((status & E1000_STATUS_SPEED_1000) ||
++ hw->phy.media_type != e1000_media_type_copper)
++ ethtool_cmd_speed_set(ecmd, SPEED_1000);
++ else if (status & E1000_STATUS_SPEED_100)
++ ethtool_cmd_speed_set(ecmd, SPEED_100);
++ else
++ ethtool_cmd_speed_set(ecmd, SPEED_10);
++
++ if ((status & E1000_STATUS_FD) ||
++ hw->phy.media_type != e1000_media_type_copper)
++ ecmd->duplex = DUPLEX_FULL;
++ else
++ ecmd->duplex = DUPLEX_HALF;
++ } else {
++ ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
++ ecmd->duplex = -1;
++ }
++
++ ecmd->autoneg = hw->mac.autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE;
++ return 0;
++}
++
++
++static u32 igb_vmdq_get_msglevel(struct net_device *netdev)
++{
++ struct igb_vmdq_adapter *vadapter = netdev_priv(netdev);
++ struct igb_adapter *adapter = vadapter->real_adapter;
++ return adapter->msg_enable;
++}
++
++static void igb_vmdq_get_drvinfo(struct net_device *netdev,
++ struct ethtool_drvinfo *drvinfo)
++{
++ struct igb_vmdq_adapter *vadapter = netdev_priv(netdev);
++ struct igb_adapter *adapter = vadapter->real_adapter;
++ struct net_device *main_netdev = adapter->netdev;
++
++ strncpy(drvinfo->driver, igb_driver_name, 32);
++ strncpy(drvinfo->version, igb_driver_version, 32);
++
++ strncpy(drvinfo->fw_version, "N/A", 4);
++ snprintf(drvinfo->bus_info, 32, "%s VMDQ %d", main_netdev->name,
++ vadapter->rx_ring->queue_index);
++ drvinfo->n_stats = 0;
++ drvinfo->testinfo_len = 0;
++ drvinfo->regdump_len = 0;
++}
++
++static void igb_vmdq_get_ringparam(struct net_device *netdev,
++ struct ethtool_ringparam *ring)
++{
++ struct igb_vmdq_adapter *vadapter = netdev_priv(netdev);
++
++ struct igb_ring *tx_ring = vadapter->tx_ring;
++ struct igb_ring *rx_ring = vadapter->rx_ring;
++
++ ring->rx_max_pending = IGB_MAX_RXD;
++ ring->tx_max_pending = IGB_MAX_TXD;
++ ring->rx_mini_max_pending = 0;
++ ring->rx_jumbo_max_pending = 0;
++ ring->rx_pending = rx_ring->count;
++ ring->tx_pending = tx_ring->count;
++ ring->rx_mini_pending = 0;
++ ring->rx_jumbo_pending = 0;
++}
++static u32 igb_vmdq_get_rx_csum(struct net_device *netdev)
++{
++ struct igb_vmdq_adapter *vadapter = netdev_priv(netdev);
++ struct igb_adapter *adapter = vadapter->real_adapter;
++
++ return test_bit(IGB_RING_FLAG_RX_CSUM, &adapter->rx_ring[0]->flags);
++}
++
++
++static struct ethtool_ops igb_vmdq_ethtool_ops = {
++ .get_settings = igb_vmdq_get_settings,
++ .get_drvinfo = igb_vmdq_get_drvinfo,
++ .get_link = ethtool_op_get_link,
++ .get_ringparam = igb_vmdq_get_ringparam,
++ .get_rx_csum = igb_vmdq_get_rx_csum,
++ .get_tx_csum = ethtool_op_get_tx_csum,
++ .get_sg = ethtool_op_get_sg,
++ .set_sg = ethtool_op_set_sg,
++ .get_msglevel = igb_vmdq_get_msglevel,
++#ifdef NETIF_F_TSO
++ .get_tso = ethtool_op_get_tso,
++#endif
++#ifdef HAVE_ETHTOOL_GET_PERM_ADDR
++ .get_perm_addr = ethtool_op_get_perm_addr,
++#endif
++};
++
++void igb_vmdq_set_ethtool_ops(struct net_device *netdev)
++{
++ SET_ETHTOOL_OPS(netdev, &igb_vmdq_ethtool_ops);
++}
++
++
++#endif /* CONFIG_IGB_VMDQ_NETDEV */
++
+diff -Nu a/drivers/net/ethernet/intel/igb/igb_vmdq.h b/drivers/net/ethernet/intel/igb/igb_vmdq.h
+--- a/drivers/net/ethernet/intel/igb/igb_vmdq.h 1970-01-01 00:00:00.000000000 +0000
++++ b/drivers/net/ethernet/intel/igb/igb_vmdq.h 2016-11-14 14:32:08.579567168 +0000
+@@ -0,0 +1,43 @@
++/*******************************************************************************
++
++ Intel(R) Gigabit Ethernet Linux driver
++ Copyright(c) 2007-2015 Intel Corporation.
++
++ This program is free software; you can redistribute it and/or modify it
++ under the terms and conditions of the GNU General Public License,
++ version 2, as published by the Free Software Foundation.
++
++ This program is distributed in the hope it will be useful, but WITHOUT
++ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ more details.
++
++ The full GNU General Public License is included in this distribution in
++ the file called "COPYING".
++
++ Contact Information:
++ Linux NICS
++ e1000-devel Mailing List
++ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
++
++*******************************************************************************/
++
++#ifndef _IGB_VMDQ_H_
++#define _IGB_VMDQ_H_
++
++#ifdef CONFIG_IGB_VMDQ_NETDEV
++int igb_vmdq_open(struct net_device *dev);
++int igb_vmdq_close(struct net_device *dev);
++netdev_tx_t igb_vmdq_xmit_frame(struct sk_buff *skb, struct net_device *dev);
++struct net_device_stats *igb_vmdq_get_stats(struct net_device *dev);
++void igb_vmdq_set_rx_mode(struct net_device *dev);
++int igb_vmdq_set_mac(struct net_device *dev, void *addr);
++int igb_vmdq_change_mtu(struct net_device *dev, int new_mtu);
++void igb_vmdq_tx_timeout(struct net_device *dev);
++void igb_vmdq_vlan_rx_register(struct net_device *dev,
++ struct vlan_group *grp);
++void igb_vmdq_vlan_rx_add_vid(struct net_device *dev, unsigned short vid);
++void igb_vmdq_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid);
++void igb_vmdq_set_ethtool_ops(struct net_device *netdev);
++#endif /* CONFIG_IGB_VMDQ_NETDEV */
++#endif /* _IGB_VMDQ_H_ */
+diff -Nu a/drivers/net/ethernet/intel/igb/kcompat.c b/drivers/net/ethernet/intel/igb/kcompat.c
+--- a/drivers/net/ethernet/intel/igb/kcompat.c 1970-01-01 00:00:00.000000000 +0000
++++ b/drivers/net/ethernet/intel/igb/kcompat.c 2016-11-14 14:32:08.579567168 +0000
+@@ -0,0 +1,2082 @@
++/*******************************************************************************
++
++ Intel(R) Gigabit Ethernet Linux driver
++ Copyright(c) 2007-2015 Intel Corporation.
++
++ This program is free software; you can redistribute it and/or modify it
++ under the terms and conditions of the GNU General Public License,
++ version 2, as published by the Free Software Foundation.
++
++ This program is distributed in the hope it will be useful, but WITHOUT
++ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ more details.
++
++ The full GNU General Public License is included in this distribution in
++ the file called "COPYING".
++
++ Contact Information:
++ Linux NICS
++ e1000-devel Mailing List
++ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
++
++*******************************************************************************/
++
++#include "igb.h"
++#include "kcompat.h"
++
++/*****************************************************************************/
++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,8) )
++/* From lib/vsprintf.c */
++#include
++
++static int skip_atoi(const char **s)
++{
++ int i=0;
++
++ while (isdigit(**s))
++ i = i*10 + *((*s)++) - '0';
++ return i;
++}
++
++#define _kc_ZEROPAD 1 /* pad with zero */
++#define _kc_SIGN 2 /* unsigned/signed long */
++#define _kc_PLUS 4 /* show plus */
++#define _kc_SPACE 8 /* space if plus */
++#define _kc_LEFT 16 /* left justified */
++#define _kc_SPECIAL 32 /* 0x */
++#define _kc_LARGE 64 /* use 'ABCDEF' instead of 'abcdef' */
++
++static char * number(char * buf, char * end, long long num, int base, int size, int precision, int type)
++{
++ char c,sign,tmp[66];
++ const char *digits;
++ const char small_digits[] = "0123456789abcdefghijklmnopqrstuvwxyz";
++ const char large_digits[] = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ";
++ int i;
++
++ digits = (type & _kc_LARGE) ? large_digits : small_digits;
++ if (type & _kc_LEFT)
++ type &= ~_kc_ZEROPAD;
++ if (base < 2 || base > 36)
++ return 0;
++ c = (type & _kc_ZEROPAD) ? '0' : ' ';
++ sign = 0;
++ if (type & _kc_SIGN) {
++ if (num < 0) {
++ sign = '-';
++ num = -num;
++ size--;
++ } else if (type & _kc_PLUS) {
++ sign = '+';
++ size--;
++ } else if (type & _kc_SPACE) {
++ sign = ' ';
++ size--;
++ }
++ }
++ if (type & _kc_SPECIAL) {
++ if (base == 16)
++ size -= 2;
++ else if (base == 8)
++ size--;
++ }
++ i = 0;
++ if (num == 0)
++ tmp[i++]='0';
++ else while (num != 0)
++ tmp[i++] = digits[do_div(num,base)];
++ if (i > precision)
++ precision = i;
++ size -= precision;
++ if (!(type&(_kc_ZEROPAD+_kc_LEFT))) {
++ while(size-->0) {
++ if (buf <= end)
++ *buf = ' ';
++ ++buf;
++ }
++ }
++ if (sign) {
++ if (buf <= end)
++ *buf = sign;
++ ++buf;
++ }
++ if (type & _kc_SPECIAL) {
++ if (base==8) {
++ if (buf <= end)
++ *buf = '0';
++ ++buf;
++ } else if (base==16) {
++ if (buf <= end)
++ *buf = '0';
++ ++buf;
++ if (buf <= end)
++ *buf = digits[33];
++ ++buf;
++ }
++ }
++ if (!(type & _kc_LEFT)) {
++ while (size-- > 0) {
++ if (buf <= end)
++ *buf = c;
++ ++buf;
++ }
++ }
++ while (i < precision--) {
++ if (buf <= end)
++ *buf = '0';
++ ++buf;
++ }
++ while (i-- > 0) {
++ if (buf <= end)
++ *buf = tmp[i];
++ ++buf;
++ }
++ while (size-- > 0) {
++ if (buf <= end)
++ *buf = ' ';
++ ++buf;
++ }
++ return buf;
++}
++
++int _kc_vsnprintf(char *buf, size_t size, const char *fmt, va_list args)
++{
++ int len;
++ unsigned long long num;
++ int i, base;
++ char *str, *end, c;
++ const char *s;
++
++ int flags; /* flags to number() */
++
++ int field_width; /* width of output field */
++ int precision; /* min. # of digits for integers; max
++ number of chars for from string */
++ int qualifier; /* 'h', 'l', or 'L' for integer fields */
++ /* 'z' support added 23/7/1999 S.H. */
++ /* 'z' changed to 'Z' --davidm 1/25/99 */
++
++ str = buf;
++ end = buf + size - 1;
++
++ if (end < buf - 1) {
++ end = ((void *) -1);
++ size = end - buf + 1;
++ }
++
++ for (; *fmt ; ++fmt) {
++ if (*fmt != '%') {
++ if (str <= end)
++ *str = *fmt;
++ ++str;
++ continue;
++ }
++
++ /* process flags */
++ flags = 0;
++ repeat:
++ ++fmt; /* this also skips first '%' */
++ switch (*fmt) {
++ case '-': flags |= _kc_LEFT; goto repeat;
++ case '+': flags |= _kc_PLUS; goto repeat;
++ case ' ': flags |= _kc_SPACE; goto repeat;
++ case '#': flags |= _kc_SPECIAL; goto repeat;
++ case '0': flags |= _kc_ZEROPAD; goto repeat;
++ }
++
++ /* get field width */
++ field_width = -1;
++ if (isdigit(*fmt))
++ field_width = skip_atoi(&fmt);
++ else if (*fmt == '*') {
++ ++fmt;
++ /* it's the next argument */
++ field_width = va_arg(args, int);
++ if (field_width < 0) {
++ field_width = -field_width;
++ flags |= _kc_LEFT;
++ }
++ }
++
++ /* get the precision */
++ precision = -1;
++ if (*fmt == '.') {
++ ++fmt;
++ if (isdigit(*fmt))
++ precision = skip_atoi(&fmt);
++ else if (*fmt == '*') {
++ ++fmt;
++ /* it's the next argument */
++ precision = va_arg(args, int);
++ }
++ if (precision < 0)
++ precision = 0;
++ }
++
++ /* get the conversion qualifier */
++ qualifier = -1;
++ if (*fmt == 'h' || *fmt == 'l' || *fmt == 'L' || *fmt =='Z') {
++ qualifier = *fmt;
++ ++fmt;
++ }
++
++ /* default base */
++ base = 10;
++
++ switch (*fmt) {
++ case 'c':
++ if (!(flags & _kc_LEFT)) {
++ while (--field_width > 0) {
++ if (str <= end)
++ *str = ' ';
++ ++str;
++ }
++ }
++ c = (unsigned char) va_arg(args, int);
++ if (str <= end)
++ *str = c;
++ ++str;
++ while (--field_width > 0) {
++ if (str <= end)
++ *str = ' ';
++ ++str;
++ }
++ continue;
++
++ case 's':
++ s = va_arg(args, char *);
++ if (!s)
++ s = "";
++
++ len = strnlen(s, precision);
++
++ if (!(flags & _kc_LEFT)) {
++ while (len < field_width--) {
++ if (str <= end)
++ *str = ' ';
++ ++str;
++ }
++ }
++ for (i = 0; i < len; ++i) {
++ if (str <= end)
++ *str = *s;
++ ++str; ++s;
++ }
++ while (len < field_width--) {
++ if (str <= end)
++ *str = ' ';
++ ++str;
++ }
++ continue;
++
++ case 'p':
++ if (field_width == -1) {
++ field_width = 2*sizeof(void *);
++ flags |= _kc_ZEROPAD;
++ }
++ str = number(str, end,
++ (unsigned long) va_arg(args, void *),
++ 16, field_width, precision, flags);
++ continue;
++
++
++ case 'n':
++ /* FIXME:
++ * What does C99 say about the overflow case here? */
++ if (qualifier == 'l') {
++ long * ip = va_arg(args, long *);
++ *ip = (str - buf);
++ } else if (qualifier == 'Z') {
++ size_t * ip = va_arg(args, size_t *);
++ *ip = (str - buf);
++ } else {
++ int * ip = va_arg(args, int *);
++ *ip = (str - buf);
++ }
++ continue;
++
++ case '%':
++ if (str <= end)
++ *str = '%';
++ ++str;
++ continue;
++
++ /* integer number formats - set up the flags and "break" */
++ case 'o':
++ base = 8;
++ break;
++
++ case 'X':
++ flags |= _kc_LARGE;
++ case 'x':
++ base = 16;
++ break;
++
++ case 'd':
++ case 'i':
++ flags |= _kc_SIGN;
++ case 'u':
++ break;
++
++ default:
++ if (str <= end)
++ *str = '%';
++ ++str;
++ if (*fmt) {
++ if (str <= end)
++ *str = *fmt;
++ ++str;
++ } else {
++ --fmt;
++ }
++ continue;
++ }
++ if (qualifier == 'L')
++ num = va_arg(args, long long);
++ else if (qualifier == 'l') {
++ num = va_arg(args, unsigned long);
++ if (flags & _kc_SIGN)
++ num = (signed long) num;
++ } else if (qualifier == 'Z') {
++ num = va_arg(args, size_t);
++ } else if (qualifier == 'h') {
++ num = (unsigned short) va_arg(args, int);
++ if (flags & _kc_SIGN)
++ num = (signed short) num;
++ } else {
++ num = va_arg(args, unsigned int);
++ if (flags & _kc_SIGN)
++ num = (signed int) num;
++ }
++ str = number(str, end, num, base,
++ field_width, precision, flags);
++ }
++ if (str <= end)
++ *str = '\0';
++ else if (size > 0)
++ /* don't write out a null byte if the buf size is zero */
++ *end = '\0';
++ /* the trailing null byte doesn't count towards the total
++ * ++str;
++ */
++ return str-buf;
++}
++
++int _kc_snprintf(char * buf, size_t size, const char *fmt, ...)
++{
++ va_list args;
++ int i;
++
++ va_start(args, fmt);
++ i = _kc_vsnprintf(buf,size,fmt,args);
++ va_end(args);
++ return i;
++}
++#endif /* < 2.4.8 */
++
++/*****************************************************************************/
++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,13) )
++
++/**************************************/
++/* PCI DMA MAPPING */
++
++#if defined(CONFIG_HIGHMEM)
++
++#ifndef PCI_DRAM_OFFSET
++#define PCI_DRAM_OFFSET 0
++#endif
++
++u64
++_kc_pci_map_page(struct pci_dev *dev, struct page *page, unsigned long offset,
++ size_t size, int direction)
++{
++ return (((u64) (page - mem_map) << PAGE_SHIFT) + offset +
++ PCI_DRAM_OFFSET);
++}
++
++#else /* CONFIG_HIGHMEM */
++
++u64
++_kc_pci_map_page(struct pci_dev *dev, struct page *page, unsigned long offset,
++ size_t size, int direction)
++{
++ return pci_map_single(dev, (void *)page_address(page) + offset, size,
++ direction);
++}
++
++#endif /* CONFIG_HIGHMEM */
++
++void
++_kc_pci_unmap_page(struct pci_dev *dev, u64 dma_addr, size_t size,
++ int direction)
++{
++ return pci_unmap_single(dev, dma_addr, size, direction);
++}
++
++#endif /* 2.4.13 => 2.4.3 */
++
++/*****************************************************************************/
++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,3) )
++
++/**************************************/
++/* PCI DRIVER API */
++
++int
++_kc_pci_set_dma_mask(struct pci_dev *dev, dma_addr_t mask)
++{
++ if (!pci_dma_supported(dev, mask))
++ return -EIO;
++ dev->dma_mask = mask;
++ return 0;
++}
++
++int
++_kc_pci_request_regions(struct pci_dev *dev, char *res_name)
++{
++ int i;
++
++ for (i = 0; i < 6; i++) {
++ if (pci_resource_len(dev, i) == 0)
++ continue;
++
++ if (pci_resource_flags(dev, i) & IORESOURCE_IO) {
++ if (!request_region(pci_resource_start(dev, i), pci_resource_len(dev, i), res_name)) {
++ pci_release_regions(dev);
++ return -EBUSY;
++ }
++ } else if (pci_resource_flags(dev, i) & IORESOURCE_MEM) {
++ if (!request_mem_region(pci_resource_start(dev, i), pci_resource_len(dev, i), res_name)) {
++ pci_release_regions(dev);
++ return -EBUSY;
++ }
++ }
++ }
++ return 0;
++}
++
++void
++_kc_pci_release_regions(struct pci_dev *dev)
++{
++ int i;
++
++ for (i = 0; i < 6; i++) {
++ if (pci_resource_len(dev, i) == 0)
++ continue;
++
++ if (pci_resource_flags(dev, i) & IORESOURCE_IO)
++ release_region(pci_resource_start(dev, i), pci_resource_len(dev, i));
++
++ else if (pci_resource_flags(dev, i) & IORESOURCE_MEM)
++ release_mem_region(pci_resource_start(dev, i), pci_resource_len(dev, i));
++ }
++}
++
++/**************************************/
++/* NETWORK DRIVER API */
++
++struct net_device *
++_kc_alloc_etherdev(int sizeof_priv)
++{
++ struct net_device *dev;
++ int alloc_size;
++
++ alloc_size = sizeof(*dev) + sizeof_priv + IFNAMSIZ + 31;
++ dev = kzalloc(alloc_size, GFP_KERNEL);
++ if (!dev)
++ return NULL;
++
++ if (sizeof_priv)
++ dev->priv = (void *) (((unsigned long)(dev + 1) + 31) & ~31);
++ dev->name[0] = '\0';
++ ether_setup(dev);
++
++ return dev;
++}
++
++int
++_kc_is_valid_ether_addr(u8 *addr)
++{
++ const char zaddr[6] = { 0, };
++
++ return !(addr[0] & 1) && memcmp(addr, zaddr, 6);
++}
++
++#endif /* 2.4.3 => 2.4.0 */
++
++/*****************************************************************************/
++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,6) )
++
++int
++_kc_pci_set_power_state(struct pci_dev *dev, int state)
++{
++ return 0;
++}
++
++int
++_kc_pci_enable_wake(struct pci_dev *pdev, u32 state, int enable)
++{
++ return 0;
++}
++
++#endif /* 2.4.6 => 2.4.3 */
++
++/*****************************************************************************/
++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) )
++void _kc_skb_fill_page_desc(struct sk_buff *skb, int i, struct page *page,
++ int off, int size)
++{
++ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
++ frag->page = page;
++ frag->page_offset = off;
++ frag->size = size;
++ skb_shinfo(skb)->nr_frags = i + 1;
++}
++
++/*
++ * Original Copyright:
++ * find_next_bit.c: fallback find next bit implementation
++ *
++ * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
++ * Written by David Howells (dhowells@redhat.com)
++ */
++
++/**
++ * find_next_bit - find the next set bit in a memory region
++ * @addr: The address to base the search on
++ * @offset: The bitnumber to start searching at
++ * @size: The maximum size to search
++ */
++unsigned long find_next_bit(const unsigned long *addr, unsigned long size,
++ unsigned long offset)
++{
++ const unsigned long *p = addr + BITOP_WORD(offset);
++ unsigned long result = offset & ~(BITS_PER_LONG-1);
++ unsigned long tmp;
++
++ if (offset >= size)
++ return size;
++ size -= result;
++ offset %= BITS_PER_LONG;
++ if (offset) {
++ tmp = *(p++);
++ tmp &= (~0UL << offset);
++ if (size < BITS_PER_LONG)
++ goto found_first;
++ if (tmp)
++ goto found_middle;
++ size -= BITS_PER_LONG;
++ result += BITS_PER_LONG;
++ }
++ while (size & ~(BITS_PER_LONG-1)) {
++ if ((tmp = *(p++)))
++ goto found_middle;
++ result += BITS_PER_LONG;
++ size -= BITS_PER_LONG;
++ }
++ if (!size)
++ return result;
++ tmp = *p;
++
++found_first:
++ tmp &= (~0UL >> (BITS_PER_LONG - size));
++ if (tmp == 0UL) /* Are any bits set? */
++ return result + size; /* Nope. */
++found_middle:
++ return result + ffs(tmp);
++}
++
++size_t _kc_strlcpy(char *dest, const char *src, size_t size)
++{
++ size_t ret = strlen(src);
++
++ if (size) {
++ size_t len = (ret >= size) ? size - 1 : ret;
++ memcpy(dest, src, len);
++ dest[len] = '\0';
++ }
++ return ret;
++}
++
++#ifndef do_div
++#if BITS_PER_LONG == 32
++uint32_t __attribute__((weak)) _kc__div64_32(uint64_t *n, uint32_t base)
++{
++ uint64_t rem = *n;
++ uint64_t b = base;
++ uint64_t res, d = 1;
++ uint32_t high = rem >> 32;
++
++ /* Reduce the thing a bit first */
++ res = 0;
++ if (high >= base) {
++ high /= base;
++ res = (uint64_t) high << 32;
++ rem -= (uint64_t) (high*base) << 32;
++ }
++
++ while ((int64_t)b > 0 && b < rem) {
++ b = b+b;
++ d = d+d;
++ }
++
++ do {
++ if (rem >= b) {
++ rem -= b;
++ res += d;
++ }
++ b >>= 1;
++ d >>= 1;
++ } while (d);
++
++ *n = res;
++ return rem;
++}
++#endif /* BITS_PER_LONG == 32 */
++#endif /* do_div */
++#endif /* 2.6.0 => 2.4.6 */
++
++/*****************************************************************************/
++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,4) )
++int _kc_scnprintf(char * buf, size_t size, const char *fmt, ...)
++{
++ va_list args;
++ int i;
++
++ va_start(args, fmt);
++ i = vsnprintf(buf, size, fmt, args);
++ va_end(args);
++ return (i >= size) ? (size - 1) : i;
++}
++#endif /* < 2.6.4 */
++
++/*****************************************************************************/
++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10) )
++DECLARE_BITMAP(_kcompat_node_online_map, MAX_NUMNODES) = {1};
++#endif /* < 2.6.10 */
++
++/*****************************************************************************/
++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,13) )
++char *_kc_kstrdup(const char *s, unsigned int gfp)
++{
++ size_t len;
++ char *buf;
++
++ if (!s)
++ return NULL;
++
++ len = strlen(s) + 1;
++ buf = kmalloc(len, gfp);
++ if (buf)
++ memcpy(buf, s, len);
++ return buf;
++}
++#endif /* < 2.6.13 */
++
++/*****************************************************************************/
++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,14) )
++void *_kc_kzalloc(size_t size, int flags)
++{
++ void *ret = kmalloc(size, flags);
++ if (ret)
++ memset(ret, 0, size);
++ return ret;
++}
++#endif /* <= 2.6.13 */
++
++/*****************************************************************************/
++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19) )
++int _kc_skb_pad(struct sk_buff *skb, int pad)
++{
++ int ntail;
++
++ /* If the skbuff is non linear tailroom is always zero.. */
++ if(!skb_cloned(skb) && skb_tailroom(skb) >= pad) {
++ memset(skb->data+skb->len, 0, pad);
++ return 0;
++ }
++
++ ntail = skb->data_len + pad - (skb->end - skb->tail);
++ if (likely(skb_cloned(skb) || ntail > 0)) {
++ if (pskb_expand_head(skb, 0, ntail, GFP_ATOMIC))
++ goto free_skb;
++ }
++
++#ifdef MAX_SKB_FRAGS
++ if (skb_is_nonlinear(skb) &&
++ !__pskb_pull_tail(skb, skb->data_len))
++ goto free_skb;
++
++#endif
++ memset(skb->data + skb->len, 0, pad);
++ return 0;
++
++free_skb:
++ kfree_skb(skb);
++ return -ENOMEM;
++}
++
++#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,4)))
++int _kc_pci_save_state(struct pci_dev *pdev)
++{
++ struct net_device *netdev = pci_get_drvdata(pdev);
++ struct adapter_struct *adapter = netdev_priv(netdev);
++ int size = PCI_CONFIG_SPACE_LEN, i;
++ u16 pcie_cap_offset, pcie_link_status;
++
++#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) )
++ /* no ->dev for 2.4 kernels */
++ WARN_ON(pdev->dev.driver_data == NULL);
++#endif
++ pcie_cap_offset = pci_find_capability(pdev, PCI_CAP_ID_EXP);
++ if (pcie_cap_offset) {
++ if (!pci_read_config_word(pdev,
++ pcie_cap_offset + PCIE_LINK_STATUS,
++ &pcie_link_status))
++ size = PCIE_CONFIG_SPACE_LEN;
++ }
++ pci_config_space_ich8lan();
++#ifdef HAVE_PCI_ERS
++ if (adapter->config_space == NULL)
++#else
++ WARN_ON(adapter->config_space != NULL);
++#endif
++ adapter->config_space = kmalloc(size, GFP_KERNEL);
++ if (!adapter->config_space) {
++ printk(KERN_ERR "Out of memory in pci_save_state\n");
++ return -ENOMEM;
++ }
++ for (i = 0; i < (size / 4); i++)
++ pci_read_config_dword(pdev, i * 4, &adapter->config_space[i]);
++ return 0;
++}
++
++void _kc_pci_restore_state(struct pci_dev *pdev)
++{
++ struct net_device *netdev = pci_get_drvdata(pdev);
++ struct adapter_struct *adapter = netdev_priv(netdev);
++ int size = PCI_CONFIG_SPACE_LEN, i;
++ u16 pcie_cap_offset;
++ u16 pcie_link_status;
++
++ if (adapter->config_space != NULL) {
++ pcie_cap_offset = pci_find_capability(pdev, PCI_CAP_ID_EXP);
++ if (pcie_cap_offset &&
++ !pci_read_config_word(pdev,
++ pcie_cap_offset + PCIE_LINK_STATUS,
++ &pcie_link_status))
++ size = PCIE_CONFIG_SPACE_LEN;
++
++ pci_config_space_ich8lan();
++ for (i = 0; i < (size / 4); i++)
++ pci_write_config_dword(pdev, i * 4, adapter->config_space[i]);
++#ifndef HAVE_PCI_ERS
++ kfree(adapter->config_space);
++ adapter->config_space = NULL;
++#endif
++ }
++}
++#endif /* !(RHEL_RELEASE_CODE >= RHEL 5.4) */
++
++#ifdef HAVE_PCI_ERS
++void _kc_free_netdev(struct net_device *netdev)
++{
++ struct adapter_struct *adapter = netdev_priv(netdev);
++
++ kfree(adapter->config_space);
++#ifdef CONFIG_SYSFS
++ if (netdev->reg_state == NETREG_UNINITIALIZED) {
++ kfree((char *)netdev - netdev->padded);
++ } else {
++ BUG_ON(netdev->reg_state != NETREG_UNREGISTERED);
++ netdev->reg_state = NETREG_RELEASED;
++ class_device_put(&netdev->class_dev);
++ }
++#else
++ kfree((char *)netdev - netdev->padded);
++#endif
++}
++#endif
++
++void *_kc_kmemdup(const void *src, size_t len, unsigned gfp)
++{
++ void *p;
++
++ p = kzalloc(len, gfp);
++ if (p)
++ memcpy(p, src, len);
++ return p;
++}
++#endif /* <= 2.6.19 */
++
++/*****************************************************************************/
++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21) )
++struct pci_dev *_kc_netdev_to_pdev(struct net_device *netdev)
++{
++ return ((struct adapter_struct *)netdev_priv(netdev))->pdev;
++}
++#endif /* < 2.6.21 */
++
++/*****************************************************************************/
++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22) )
++/* hexdump code taken from lib/hexdump.c */
++static void _kc_hex_dump_to_buffer(const void *buf, size_t len, int rowsize,
++ int groupsize, unsigned char *linebuf,
++ size_t linebuflen, bool ascii)
++{
++ const u8 *ptr = buf;
++ u8 ch;
++ int j, lx = 0;
++ int ascii_column;
++
++ if (rowsize != 16 && rowsize != 32)
++ rowsize = 16;
++
++ if (!len)
++ goto nil;
++ if (len > rowsize) /* limit to one line at a time */
++ len = rowsize;
++ if ((len % groupsize) != 0) /* no mixed size output */
++ groupsize = 1;
++
++ switch (groupsize) {
++ case 8: {
++ const u64 *ptr8 = buf;
++ int ngroups = len / groupsize;
++
++ for (j = 0; j < ngroups; j++)
++ lx += scnprintf((char *)(linebuf + lx), linebuflen - lx,
++ "%s%16.16llx", j ? " " : "",
++ (unsigned long long)*(ptr8 + j));
++ ascii_column = 17 * ngroups + 2;
++ break;
++ }
++
++ case 4: {
++ const u32 *ptr4 = buf;
++ int ngroups = len / groupsize;
++
++ for (j = 0; j < ngroups; j++)
++ lx += scnprintf((char *)(linebuf + lx), linebuflen - lx,
++ "%s%8.8x", j ? " " : "", *(ptr4 + j));
++ ascii_column = 9 * ngroups + 2;
++ break;
++ }
++
++ case 2: {
++ const u16 *ptr2 = buf;
++ int ngroups = len / groupsize;
++
++ for (j = 0; j < ngroups; j++)
++ lx += scnprintf((char *)(linebuf + lx), linebuflen - lx,
++ "%s%4.4x", j ? " " : "", *(ptr2 + j));
++ ascii_column = 5 * ngroups + 2;
++ break;
++ }
++
++ default:
++ for (j = 0; (j < len) && (lx + 3) <= linebuflen; j++) {
++ ch = ptr[j];
++ linebuf[lx++] = hex_asc(ch >> 4);
++ linebuf[lx++] = hex_asc(ch & 0x0f);
++ linebuf[lx++] = ' ';
++ }
++ if (j)
++ lx--;
++
++ ascii_column = 3 * rowsize + 2;
++ break;
++ }
++ if (!ascii)
++ goto nil;
++
++ while (lx < (linebuflen - 1) && lx < (ascii_column - 1))
++ linebuf[lx++] = ' ';
++ for (j = 0; (j < len) && (lx + 2) < linebuflen; j++)
++ linebuf[lx++] = (isascii(ptr[j]) && isprint(ptr[j])) ? ptr[j]
++ : '.';
++nil:
++ linebuf[lx++] = '\0';
++}
++
++void _kc_print_hex_dump(const char *level,
++ const char *prefix_str, int prefix_type,
++ int rowsize, int groupsize,
++ const void *buf, size_t len, bool ascii)
++{
++ const u8 *ptr = buf;
++ int i, linelen, remaining = len;
++ unsigned char linebuf[200];
++
++ if (rowsize != 16 && rowsize != 32)
++ rowsize = 16;
++
++ for (i = 0; i < len; i += rowsize) {
++ linelen = min(remaining, rowsize);
++ remaining -= rowsize;
++ _kc_hex_dump_to_buffer(ptr + i, linelen, rowsize, groupsize,
++ linebuf, sizeof(linebuf), ascii);
++
++ switch (prefix_type) {
++ case DUMP_PREFIX_ADDRESS:
++ printk("%s%s%*p: %s\n", level, prefix_str,
++ (int)(2 * sizeof(void *)), ptr + i, linebuf);
++ break;
++ case DUMP_PREFIX_OFFSET:
++ printk("%s%s%.8x: %s\n", level, prefix_str, i, linebuf);
++ break;
++ default:
++ printk("%s%s%s\n", level, prefix_str, linebuf);
++ break;
++ }
++ }
++}
++
++#ifdef HAVE_I2C_SUPPORT
++struct i2c_client *
++_kc_i2c_new_device(struct i2c_adapter *adap, struct i2c_board_info const *info)
++{
++ struct i2c_client *client;
++ int status;
++
++ client = kzalloc(sizeof *client, GFP_KERNEL);
++ if (!client)
++ return NULL;
++
++ client->adapter = adap;
++
++ client->dev.platform_data = info->platform_data;
++
++ client->flags = info->flags;
++ client->addr = info->addr;
++
++ strlcpy(client->name, info->type, sizeof(client->name));
++
++ /* Check for address business */
++ status = i2c_check_addr(adap, client->addr);
++ if (status)
++ goto out_err;
++
++ client->dev.parent = &client->adapter->dev;
++ client->dev.bus = &i2c_bus_type;
++
++ status = i2c_attach_client(client);
++ if (status)
++ goto out_err;
++
++ dev_dbg(&adap->dev, "client [%s] registered with bus id %s\n",
++ client->name, dev_name(&client->dev));
++
++ return client;
++
++out_err:
++ dev_err(&adap->dev, "Failed to register i2c client %s at 0x%02x "
++ "(%d)\n", client->name, client->addr, status);
++ kfree(client);
++ return NULL;
++}
++#endif /* HAVE_I2C_SUPPORT */
++#endif /* < 2.6.22 */
++
++/*****************************************************************************/
++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24) )
++#ifdef NAPI
++struct net_device *napi_to_poll_dev(const struct napi_struct *napi)
++{
++ struct adapter_q_vector *q_vector = container_of(napi,
++ struct adapter_q_vector,
++ napi);
++ return &q_vector->poll_dev;
++}
++
++int __kc_adapter_clean(struct net_device *netdev, int *budget)
++{
++ int work_done;
++ int work_to_do = min(*budget, netdev->quota);
++ /* kcompat.h netif_napi_add puts napi struct in "fake netdev->priv" */
++ struct napi_struct *napi = netdev->priv;
++ work_done = napi->poll(napi, work_to_do);
++ *budget -= work_done;
++ netdev->quota -= work_done;
++ return (work_done >= work_to_do) ? 1 : 0;
++}
++#endif /* NAPI */
++#endif /* <= 2.6.24 */
++
++/*****************************************************************************/
++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26) )
++void _kc_pci_disable_link_state(struct pci_dev *pdev, int state)
++{
++ struct pci_dev *parent = pdev->bus->self;
++ u16 link_state;
++ int pos;
++
++ if (!parent)
++ return;
++
++ pos = pci_find_capability(parent, PCI_CAP_ID_EXP);
++ if (pos) {
++ pci_read_config_word(parent, pos + PCI_EXP_LNKCTL, &link_state);
++ link_state &= ~state;
++ pci_write_config_word(parent, pos + PCI_EXP_LNKCTL, link_state);
++ }
++}
++#endif /* < 2.6.26 */
++
++/*****************************************************************************/
++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27) )
++#ifdef HAVE_TX_MQ
++void _kc_netif_tx_stop_all_queues(struct net_device *netdev)
++{
++ struct adapter_struct *adapter = netdev_priv(netdev);
++ int i;
++
++ netif_stop_queue(netdev);
++ if (netif_is_multiqueue(netdev))
++ for (i = 0; i < adapter->num_tx_queues; i++)
++ netif_stop_subqueue(netdev, i);
++}
++void _kc_netif_tx_wake_all_queues(struct net_device *netdev)
++{
++ struct adapter_struct *adapter = netdev_priv(netdev);
++ int i;
++
++ netif_wake_queue(netdev);
++ if (netif_is_multiqueue(netdev))
++ for (i = 0; i < adapter->num_tx_queues; i++)
++ netif_wake_subqueue(netdev, i);
++}
++void _kc_netif_tx_start_all_queues(struct net_device *netdev)
++{
++ struct adapter_struct *adapter = netdev_priv(netdev);
++ int i;
++
++ netif_start_queue(netdev);
++ if (netif_is_multiqueue(netdev))
++ for (i = 0; i < adapter->num_tx_queues; i++)
++ netif_start_subqueue(netdev, i);
++}
++#endif /* HAVE_TX_MQ */
++
++void __kc_warn_slowpath(const char *file, int line, const char *fmt, ...)
++{
++ va_list args;
++
++ printk(KERN_WARNING "------------[ cut here ]------------\n");
++ printk(KERN_WARNING "WARNING: at %s:%d \n", file, line);
++ va_start(args, fmt);
++ vprintk(fmt, args);
++ va_end(args);
++
++ dump_stack();
++}
++#endif /* __VMKLNX__ */
++
++/*****************************************************************************/
++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) )
++
++int
++_kc_pci_prepare_to_sleep(struct pci_dev *dev)
++{
++ pci_power_t target_state;
++ int error;
++
++ target_state = pci_choose_state(dev, PMSG_SUSPEND);
++
++ pci_enable_wake(dev, target_state, true);
++
++ error = pci_set_power_state(dev, target_state);
++
++ if (error)
++ pci_enable_wake(dev, target_state, false);
++
++ return error;
++}
++
++int
++_kc_pci_wake_from_d3(struct pci_dev *dev, bool enable)
++{
++ int err;
++
++ err = pci_enable_wake(dev, PCI_D3cold, enable);
++ if (err)
++ goto out;
++
++ err = pci_enable_wake(dev, PCI_D3hot, enable);
++
++out:
++ return err;
++}
++#endif /* < 2.6.28 */
++
++/*****************************************************************************/
++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,29) )
++static void __kc_pci_set_master(struct pci_dev *pdev, bool enable)
++{
++ u16 old_cmd, cmd;
++
++ pci_read_config_word(pdev, PCI_COMMAND, &old_cmd);
++ if (enable)
++ cmd = old_cmd | PCI_COMMAND_MASTER;
++ else
++ cmd = old_cmd & ~PCI_COMMAND_MASTER;
++ if (cmd != old_cmd) {
++ dev_dbg(pci_dev_to_dev(pdev), "%s bus mastering\n",
++ enable ? "enabling" : "disabling");
++ pci_write_config_word(pdev, PCI_COMMAND, cmd);
++ }
++#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,7) )
++ pdev->is_busmaster = enable;
++#endif
++}
++
++void _kc_pci_clear_master(struct pci_dev *dev)
++{
++ __kc_pci_set_master(dev, false);
++}
++#endif /* < 2.6.29 */
++
++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,34) )
++#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(6,0))
++int _kc_pci_num_vf(struct pci_dev __maybe_unused *dev)
++{
++ int num_vf = 0;
++#ifdef CONFIG_PCI_IOV
++ struct pci_dev *vfdev;
++
++ /* loop through all ethernet devices starting at PF dev */
++ vfdev = pci_get_class(PCI_CLASS_NETWORK_ETHERNET << 8, NULL);
++ while (vfdev) {
++ if (vfdev->is_virtfn && vfdev->physfn == dev)
++ num_vf++;
++
++ vfdev = pci_get_class(PCI_CLASS_NETWORK_ETHERNET << 8, vfdev);
++ }
++
++#endif
++ return num_vf;
++}
++#endif /* RHEL_RELEASE_CODE */
++#endif /* < 2.6.34 */
++
++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35) )
++#ifdef HAVE_TX_MQ
++#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,0)))
++#ifndef CONFIG_NETDEVICES_MULTIQUEUE
++int _kc_netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq)
++{
++ unsigned int real_num = dev->real_num_tx_queues;
++ struct Qdisc *qdisc;
++ int i;
++
++ if (txq < 1 || txq > dev->num_tx_queues)
++ return -EINVAL;
++
++ else if (txq > real_num)
++ dev->real_num_tx_queues = txq;
++ else if (txq < real_num) {
++ dev->real_num_tx_queues = txq;
++ for (i = txq; i < dev->num_tx_queues; i++) {
++ qdisc = netdev_get_tx_queue(dev, i)->qdisc;
++ if (qdisc) {
++ spin_lock_bh(qdisc_lock(qdisc));
++ qdisc_reset(qdisc);
++ spin_unlock_bh(qdisc_lock(qdisc));
++ }
++ }
++ }
++
++ return 0;
++}
++#endif /* CONFIG_NETDEVICES_MULTIQUEUE */
++#endif /* !(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,0)) */
++#endif /* HAVE_TX_MQ */
++
++ssize_t _kc_simple_write_to_buffer(void *to, size_t available, loff_t *ppos,
++ const void __user *from, size_t count)
++{
++ loff_t pos = *ppos;
++ size_t res;
++
++ if (pos < 0)
++ return -EINVAL;
++ if (pos >= available || !count)
++ return 0;
++ if (count > available - pos)
++ count = available - pos;
++ res = copy_from_user(to + pos, from, count);
++ if (res == count)
++ return -EFAULT;
++ count -= res;
++ *ppos = pos + count;
++ return count;
++}
++
++#endif /* < 2.6.35 */
++
++/*****************************************************************************/
++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36) )
++static const u32 _kc_flags_dup_features =
++ (ETH_FLAG_LRO | ETH_FLAG_NTUPLE | ETH_FLAG_RXHASH);
++
++u32 _kc_ethtool_op_get_flags(struct net_device *dev)
++{
++ return dev->features & _kc_flags_dup_features;
++}
++
++int _kc_ethtool_op_set_flags(struct net_device *dev, u32 data, u32 supported)
++{
++ if (data & ~supported)
++ return -EINVAL;
++
++ dev->features = ((dev->features & ~_kc_flags_dup_features) |
++ (data & _kc_flags_dup_features));
++ return 0;
++}
++#endif /* < 2.6.36 */
++
++/******************************************************************************/
++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,39) )
++#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6,0)))
++
++#endif /* !(RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6,0)) */
++#endif /* < 2.6.39 */
++
++/******************************************************************************/
++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,4,0) )
++void _kc_skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page,
++ int off, int size, unsigned int truesize)
++{
++ skb_fill_page_desc(skb, i, page, off, size);
++ skb->len += size;
++ skb->data_len += size;
++ skb->truesize += truesize;
++}
++
++#if !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,3,0))
++int _kc_simple_open(struct inode *inode, struct file *file)
++{
++ if (inode->i_private)
++ file->private_data = inode->i_private;
++
++ return 0;
++}
++#endif /* SLE_VERSION < 11,3,0 */
++
++#endif /* < 3.4.0 */
++
++/******************************************************************************/
++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,7,0) )
++static inline int __kc_pcie_cap_version(struct pci_dev *dev)
++{
++ int pos;
++ u16 reg16;
++
++ pos = pci_find_capability(dev, PCI_CAP_ID_EXP);
++ if (!pos)
++ return 0;
++ pci_read_config_word(dev, pos + PCI_EXP_FLAGS, ®16);
++ return reg16 & PCI_EXP_FLAGS_VERS;
++}
++
++static inline bool __kc_pcie_cap_has_devctl(const struct pci_dev __always_unused *dev)
++{
++ return true;
++}
++
++static inline bool __kc_pcie_cap_has_lnkctl(struct pci_dev *dev)
++{
++ int type = pci_pcie_type(dev);
++
++ return __kc_pcie_cap_version(dev) > 1 ||
++ type == PCI_EXP_TYPE_ROOT_PORT ||
++ type == PCI_EXP_TYPE_ENDPOINT ||
++ type == PCI_EXP_TYPE_LEG_END;
++}
++
++static inline bool __kc_pcie_cap_has_sltctl(struct pci_dev *dev)
++{
++ int type = pci_pcie_type(dev);
++ int pos;
++ u16 pcie_flags_reg;
++
++ pos = pci_find_capability(dev, PCI_CAP_ID_EXP);
++ if (!pos)
++ return false;
++ pci_read_config_word(dev, pos + PCI_EXP_FLAGS, &pcie_flags_reg);
++
++ return __kc_pcie_cap_version(dev) > 1 ||
++ type == PCI_EXP_TYPE_ROOT_PORT ||
++ (type == PCI_EXP_TYPE_DOWNSTREAM &&
++ pcie_flags_reg & PCI_EXP_FLAGS_SLOT);
++}
++
++static inline bool __kc_pcie_cap_has_rtctl(struct pci_dev *dev)
++{
++ int type = pci_pcie_type(dev);
++
++ return __kc_pcie_cap_version(dev) > 1 ||
++ type == PCI_EXP_TYPE_ROOT_PORT ||
++ type == PCI_EXP_TYPE_RC_EC;
++}
++
++static bool __kc_pcie_capability_reg_implemented(struct pci_dev *dev, int pos)
++{
++ if (!pci_is_pcie(dev))
++ return false;
++
++ switch (pos) {
++ case PCI_EXP_FLAGS_TYPE:
++ return true;
++ case PCI_EXP_DEVCAP:
++ case PCI_EXP_DEVCTL:
++ case PCI_EXP_DEVSTA:
++ return __kc_pcie_cap_has_devctl(dev);
++ case PCI_EXP_LNKCAP:
++ case PCI_EXP_LNKCTL:
++ case PCI_EXP_LNKSTA:
++ return __kc_pcie_cap_has_lnkctl(dev);
++ case PCI_EXP_SLTCAP:
++ case PCI_EXP_SLTCTL:
++ case PCI_EXP_SLTSTA:
++ return __kc_pcie_cap_has_sltctl(dev);
++ case PCI_EXP_RTCTL:
++ case PCI_EXP_RTCAP:
++ case PCI_EXP_RTSTA:
++ return __kc_pcie_cap_has_rtctl(dev);
++ case PCI_EXP_DEVCAP2:
++ case PCI_EXP_DEVCTL2:
++ case PCI_EXP_LNKCAP2:
++ case PCI_EXP_LNKCTL2:
++ case PCI_EXP_LNKSTA2:
++ return __kc_pcie_cap_version(dev) > 1;
++ default:
++ return false;
++ }
++}
++
++/*
++ * Note that these accessor functions are only for the "PCI Express
++ * Capability" (see PCIe spec r3.0, sec 7.8). They do not apply to the
++ * other "PCI Express Extended Capabilities" (AER, VC, ACS, MFVC, etc.)
++ */
++int __kc_pcie_capability_read_word(struct pci_dev *dev, int pos, u16 *val)
++{
++ int ret;
++
++ *val = 0;
++ if (pos & 1)
++ return -EINVAL;
++
++ if (__kc_pcie_capability_reg_implemented(dev, pos)) {
++ ret = pci_read_config_word(dev, pci_pcie_cap(dev) + pos, val);
++ /*
++ * Reset *val to 0 if pci_read_config_word() fails, it may
++ * have been written as 0xFFFF if hardware error happens
++ * during pci_read_config_word().
++ */
++ if (ret)
++ *val = 0;
++ return ret;
++ }
++
++ /*
++ * For Functions that do not implement the Slot Capabilities,
++ * Slot Status, and Slot Control registers, these spaces must
++ * be hardwired to 0b, with the exception of the Presence Detect
++ * State bit in the Slot Status register of Downstream Ports,
++ * which must be hardwired to 1b. (PCIe Base Spec 3.0, sec 7.8)
++ */
++ if (pci_is_pcie(dev) && pos == PCI_EXP_SLTSTA &&
++ pci_pcie_type(dev) == PCI_EXP_TYPE_DOWNSTREAM) {
++ *val = PCI_EXP_SLTSTA_PDS;
++ }
++
++ return 0;
++}
++
++int __kc_pcie_capability_write_word(struct pci_dev *dev, int pos, u16 val)
++{
++ if (pos & 1)
++ return -EINVAL;
++
++ if (!__kc_pcie_capability_reg_implemented(dev, pos))
++ return 0;
++
++ return pci_write_config_word(dev, pci_pcie_cap(dev) + pos, val);
++}
++
++int __kc_pcie_capability_clear_and_set_word(struct pci_dev *dev, int pos,
++ u16 clear, u16 set)
++{
++ int ret;
++ u16 val;
++
++ ret = __kc_pcie_capability_read_word(dev, pos, &val);
++ if (!ret) {
++ val &= ~clear;
++ val |= set;
++ ret = __kc_pcie_capability_write_word(dev, pos, val);
++ }
++
++ return ret;
++}
++
++int __kc_pcie_capability_clear_word(struct pci_dev *dev, int pos,
++ u16 clear)
++{
++ return __kc_pcie_capability_clear_and_set_word(dev, pos, clear, 0);
++}
++#endif /* < 3.7.0 */
++
++/******************************************************************************
++ * ripped from linux/net/ipv6/exthdrs_core.c, GPL2, no direct copyright,
++ * inferred copyright from kernel
++ */
++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,8,0) )
++int __kc_ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset,
++ int target, unsigned short *fragoff, int *flags)
++{
++ unsigned int start = skb_network_offset(skb) + sizeof(struct ipv6hdr);
++ u8 nexthdr = ipv6_hdr(skb)->nexthdr;
++ unsigned int len;
++ bool found;
++
++#define __KC_IP6_FH_F_FRAG BIT(0)
++#define __KC_IP6_FH_F_AUTH BIT(1)
++#define __KC_IP6_FH_F_SKIP_RH BIT(2)
++
++ if (fragoff)
++ *fragoff = 0;
++
++ if (*offset) {
++ struct ipv6hdr _ip6, *ip6;
++
++ ip6 = skb_header_pointer(skb, *offset, sizeof(_ip6), &_ip6);
++ if (!ip6 || (ip6->version != 6)) {
++ printk(KERN_ERR "IPv6 header not found\n");
++ return -EBADMSG;
++ }
++ start = *offset + sizeof(struct ipv6hdr);
++ nexthdr = ip6->nexthdr;
++ }
++ len = skb->len - start;
++
++ do {
++ struct ipv6_opt_hdr _hdr, *hp;
++ unsigned int hdrlen;
++ found = (nexthdr == target);
++
++ if ((!ipv6_ext_hdr(nexthdr)) || nexthdr == NEXTHDR_NONE) {
++ if (target < 0 || found)
++ break;
++ return -ENOENT;
++ }
++
++ hp = skb_header_pointer(skb, start, sizeof(_hdr), &_hdr);
++ if (!hp)
++ return -EBADMSG;
++
++ if (nexthdr == NEXTHDR_ROUTING) {
++ struct ipv6_rt_hdr _rh, *rh;
++
++ rh = skb_header_pointer(skb, start, sizeof(_rh),
++ &_rh);
++ if (!rh)
++ return -EBADMSG;
++
++ if (flags && (*flags & __KC_IP6_FH_F_SKIP_RH) &&
++ rh->segments_left == 0)
++ found = false;
++ }
++
++ if (nexthdr == NEXTHDR_FRAGMENT) {
++ unsigned short _frag_off;
++ __be16 *fp;
++
++ if (flags) /* Indicate that this is a fragment */
++ *flags |= __KC_IP6_FH_F_FRAG;
++ fp = skb_header_pointer(skb,
++ start+offsetof(struct frag_hdr,
++ frag_off),
++ sizeof(_frag_off),
++ &_frag_off);
++ if (!fp)
++ return -EBADMSG;
++
++ _frag_off = ntohs(*fp) & ~0x7;
++ if (_frag_off) {
++ if (target < 0 &&
++ ((!ipv6_ext_hdr(hp->nexthdr)) ||
++ hp->nexthdr == NEXTHDR_NONE)) {
++ if (fragoff)
++ *fragoff = _frag_off;
++ return hp->nexthdr;
++ }
++ return -ENOENT;
++ }
++ hdrlen = 8;
++ } else if (nexthdr == NEXTHDR_AUTH) {
++ if (flags && (*flags & __KC_IP6_FH_F_AUTH) && (target < 0))
++ break;
++ hdrlen = (hp->hdrlen + 2) << 2;
++ } else
++ hdrlen = ipv6_optlen(hp);
++
++ if (!found) {
++ nexthdr = hp->nexthdr;
++ len -= hdrlen;
++ start += hdrlen;
++ }
++ } while (!found);
++
++ *offset = start;
++ return nexthdr;
++}
++#endif /* < 3.8.0 */
++
++/******************************************************************************/
++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,9,0) )
++#endif /* 3.9.0 */
++
++/*****************************************************************************/
++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0) )
++#ifdef HAVE_FDB_OPS
++#ifdef USE_CONST_DEV_UC_CHAR
++int __kc_ndo_dflt_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
++ struct net_device *dev, const unsigned char *addr,
++ u16 flags)
++#else
++int __kc_ndo_dflt_fdb_add(struct ndmsg *ndm, struct net_device *dev,
++ unsigned char *addr, u16 flags)
++#endif
++{
++ int err = -EINVAL;
++
++ /* If aging addresses are supported device will need to
++ * implement its own handler for this.
++ */
++ if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) {
++ pr_info("%s: FDB only supports static addresses\n", dev->name);
++ return err;
++ }
++
++ if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
++ err = dev_uc_add_excl(dev, addr);
++ else if (is_multicast_ether_addr(addr))
++ err = dev_mc_add_excl(dev, addr);
++
++ /* Only return duplicate errors if NLM_F_EXCL is set */
++ if (err == -EEXIST && !(flags & NLM_F_EXCL))
++ err = 0;
++
++ return err;
++}
++
++#ifdef USE_CONST_DEV_UC_CHAR
++#ifdef HAVE_FDB_DEL_NLATTR
++int __kc_ndo_dflt_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
++ struct net_device *dev, const unsigned char *addr)
++#else
++int __kc_ndo_dflt_fdb_del(struct ndmsg *ndm, struct net_device *dev,
++ const unsigned char *addr)
++#endif
++#else
++int __kc_ndo_dflt_fdb_del(struct ndmsg *ndm, struct net_device *dev,
++ unsigned char *addr)
++#endif
++{
++ int err = -EINVAL;
++
++ /* If aging addresses are supported device will need to
++ * implement its own handler for this.
++ */
++ if (!(ndm->ndm_state & NUD_PERMANENT)) {
++ pr_info("%s: FDB only supports static addresses\n", dev->name);
++ return err;
++ }
++
++ if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
++ err = dev_uc_del(dev, addr);
++ else if (is_multicast_ether_addr(addr))
++ err = dev_mc_del(dev, addr);
++
++ return err;
++}
++
++#endif /* HAVE_FDB_OPS */
++#ifdef CONFIG_PCI_IOV
++int __kc_pci_vfs_assigned(struct pci_dev __maybe_unused *dev)
++{
++ unsigned int vfs_assigned = 0;
++#ifdef HAVE_PCI_DEV_FLAGS_ASSIGNED
++ int pos;
++ struct pci_dev *vfdev;
++ unsigned short dev_id;
++
++ /* only search if we are a PF */
++ if (!dev->is_physfn)
++ return 0;
++
++ /* find SR-IOV capability */
++ pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV);
++ if (!pos)
++ return 0;
++
++ /*
++ * determine the device ID for the VFs, the vendor ID will be the
++ * same as the PF so there is no need to check for that one
++ */
++ pci_read_config_word(dev, pos + PCI_SRIOV_VF_DID, &dev_id);
++
++ /* loop through all the VFs to see if we own any that are assigned */
++ vfdev = pci_get_device(dev->vendor, dev_id, NULL);
++ while (vfdev) {
++ /*
++ * It is considered assigned if it is a virtual function with
++ * our dev as the physical function and the assigned bit is set
++ */
++ if (vfdev->is_virtfn && (vfdev->physfn == dev) &&
++ (vfdev->dev_flags & PCI_DEV_FLAGS_ASSIGNED))
++ vfs_assigned++;
++
++ vfdev = pci_get_device(dev->vendor, dev_id, vfdev);
++ }
++
++#endif /* HAVE_PCI_DEV_FLAGS_ASSIGNED */
++ return vfs_assigned;
++}
++
++#endif /* CONFIG_PCI_IOV */
++#endif /* 3.10.0 */
++
++/*****************************************************************************/
++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,12,0) )
++const unsigned char pcie_link_speed[] = {
++ PCI_SPEED_UNKNOWN, /* 0 */
++ PCIE_SPEED_2_5GT, /* 1 */
++ PCIE_SPEED_5_0GT, /* 2 */
++ PCIE_SPEED_8_0GT, /* 3 */
++ PCI_SPEED_UNKNOWN, /* 4 */
++ PCI_SPEED_UNKNOWN, /* 5 */
++ PCI_SPEED_UNKNOWN, /* 6 */
++ PCI_SPEED_UNKNOWN, /* 7 */
++ PCI_SPEED_UNKNOWN, /* 8 */
++ PCI_SPEED_UNKNOWN, /* 9 */
++ PCI_SPEED_UNKNOWN, /* A */
++ PCI_SPEED_UNKNOWN, /* B */
++ PCI_SPEED_UNKNOWN, /* C */
++ PCI_SPEED_UNKNOWN, /* D */
++ PCI_SPEED_UNKNOWN, /* E */
++ PCI_SPEED_UNKNOWN /* F */
++};
++
++int __kc_pcie_get_minimum_link(struct pci_dev *dev, enum pci_bus_speed *speed,
++ enum pcie_link_width *width)
++{
++ int ret;
++
++ *speed = PCI_SPEED_UNKNOWN;
++ *width = PCIE_LNK_WIDTH_UNKNOWN;
++
++ while (dev) {
++ u16 lnksta;
++ enum pci_bus_speed next_speed;
++ enum pcie_link_width next_width;
++
++ ret = pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &lnksta);
++ if (ret)
++ return ret;
++
++ next_speed = pcie_link_speed[lnksta & PCI_EXP_LNKSTA_CLS];
++ next_width = (lnksta & PCI_EXP_LNKSTA_NLW) >>
++ PCI_EXP_LNKSTA_NLW_SHIFT;
++
++ if (next_speed < *speed)
++ *speed = next_speed;
++
++ if (next_width < *width)
++ *width = next_width;
++
++ dev = dev->bus->self;
++ }
++
++ return 0;
++}
++
++#endif
++
++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,13,0) )
++int __kc_dma_set_mask_and_coherent(struct device *dev, u64 mask)
++{
++ int err = dma_set_mask(dev, mask);
++
++ if (!err)
++ /* coherent mask for the same size will always succeed if
++ * dma_set_mask does. However we store the error anyways, due
++ * to some kernels which use gcc's warn_unused_result on their
++ * definition of dma_set_coherent_mask.
++ */
++ err = dma_set_coherent_mask(dev, mask);
++ return err;
++}
++
++void __kc_netdev_rss_key_fill(void *buffer, size_t len)
++{
++ /* Set of random keys generated using kernel random number generator */
++ static const u8 seed[NETDEV_RSS_KEY_LEN] = {0xE6, 0xFA, 0x35, 0x62,
++ 0x95, 0x12, 0x3E, 0xA3, 0xFB, 0x46, 0xC1, 0x5F,
++ 0xB1, 0x43, 0x82, 0x5B, 0x6A, 0x49, 0x50, 0x95,
++ 0xCD, 0xAB, 0xD8, 0x11, 0x8F, 0xC5, 0xBD, 0xBC,
++ 0x6A, 0x4A, 0xB2, 0xD4, 0x1F, 0xFE, 0xBC, 0x41,
++ 0xBF, 0xAC, 0xB2, 0x9A, 0x8F, 0x70, 0xE9, 0x2A,
++ 0xD7, 0xB2, 0x80, 0xB6, 0x5B, 0xAA, 0x9D, 0x20};
++
++ BUG_ON(len > NETDEV_RSS_KEY_LEN);
++ memcpy(buffer, seed, len);
++}
++#endif /* 3.13.0 */
++
++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,14,0) )
++int __kc_pci_enable_msix_range(struct pci_dev *dev, struct msix_entry *entries,
++ int minvec, int maxvec)
++{
++ int nvec = maxvec;
++ int rc;
++
++ if (maxvec < minvec)
++ return -ERANGE;
++
++ do {
++ rc = pci_enable_msix(dev, entries, nvec);
++ if (rc < 0) {
++ return rc;
++ } else if (rc > 0) {
++ if (rc < minvec)
++ return -ENOSPC;
++ nvec = rc;
++ }
++ } while (rc);
++
++ return nvec;
++}
++#endif /* 3.14.0 */
++
++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,16,0) )
++#ifdef HAVE_SET_RX_MODE
++#ifdef NETDEV_HW_ADDR_T_UNICAST
++int __kc_hw_addr_sync_dev(struct netdev_hw_addr_list *list,
++ struct net_device *dev,
++ int (*sync)(struct net_device *, const unsigned char *),
++ int (*unsync)(struct net_device *, const unsigned char *))
++{
++ struct netdev_hw_addr *ha, *tmp;
++ int err;
++
++ /* first go through and flush out any stale entries */
++ list_for_each_entry_safe(ha, tmp, &list->list, list) {
++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0) )
++ if (!ha->synced || ha->refcount != 1)
++#else
++ if (!ha->sync_cnt || ha->refcount != 1)
++#endif
++ continue;
++
++ if (unsync && unsync(dev, ha->addr))
++ continue;
++
++ list_del_rcu(&ha->list);
++ kfree_rcu(ha, rcu_head);
++ list->count--;
++ }
++
++ /* go through and sync new entries to the list */
++ list_for_each_entry_safe(ha, tmp, &list->list, list) {
++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0) )
++ if (ha->synced)
++#else
++ if (ha->sync_cnt)
++#endif
++ continue;
++
++ err = sync(dev, ha->addr);
++ if (err)
++ return err;
++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0) )
++ ha->synced = true;
++#else
++ ha->sync_cnt++;
++#endif
++ ha->refcount++;
++ }
++
++ return 0;
++}
++
++void __kc_hw_addr_unsync_dev(struct netdev_hw_addr_list *list,
++ struct net_device *dev,
++ int (*unsync)(struct net_device *, const unsigned char *))
++{
++ struct netdev_hw_addr *ha, *tmp;
++
++ list_for_each_entry_safe(ha, tmp, &list->list, list) {
++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0) )
++ if (!ha->synced)
++#else
++ if (!ha->sync_cnt)
++#endif
++ continue;
++
++ if (unsync && unsync(dev, ha->addr))
++ continue;
++
++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0) )
++ ha->synced = false;
++#else
++ ha->sync_cnt--;
++#endif
++ if (--ha->refcount)
++ continue;
++
++ list_del_rcu(&ha->list);
++ kfree_rcu(ha, rcu_head);
++ list->count--;
++ }
++}
++
++#endif /* NETDEV_HW_ADDR_T_UNICAST */
++#ifndef NETDEV_HW_ADDR_T_MULTICAST
++int __kc_dev_addr_sync_dev(struct dev_addr_list **list, int *count,
++ struct net_device *dev,
++ int (*sync)(struct net_device *, const unsigned char *),
++ int (*unsync)(struct net_device *, const unsigned char *))
++{
++ struct dev_addr_list *da, **next = list;
++ int err;
++
++ /* first go through and flush out any stale entries */
++ while ((da = *next) != NULL) {
++ if (da->da_synced && da->da_users == 1) {
++ if (!unsync || !unsync(dev, da->da_addr)) {
++ *next = da->next;
++ kfree(da);
++ (*count)--;
++ continue;
++ }
++ }
++ next = &da->next;
++ }
++
++ /* go through and sync new entries to the list */
++ for (da = *list; da != NULL; da = da->next) {
++ if (da->da_synced)
++ continue;
++
++ err = sync(dev, da->da_addr);
++ if (err)
++ return err;
++
++ da->da_synced++;
++ da->da_users++;
++ }
++
++ return 0;
++}
++
++void __kc_dev_addr_unsync_dev(struct dev_addr_list **list, int *count,
++ struct net_device *dev,
++ int (*unsync)(struct net_device *, const unsigned char *))
++{
++ struct dev_addr_list *da;
++
++ while ((da = *list) != NULL) {
++ if (da->da_synced) {
++ if (!unsync || !unsync(dev, da->da_addr)) {
++ da->da_synced--;
++ if (--da->da_users == 0) {
++ *list = da->next;
++ kfree(da);
++ (*count)--;
++ continue;
++ }
++ }
++ }
++ list = &da->next;
++ }
++}
++#endif /* NETDEV_HW_ADDR_T_MULTICAST */
++#endif /* HAVE_SET_RX_MODE */
++#endif /* 3.16.0 */
++
++/******************************************************************************/
++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,18,0) )
++#ifndef NO_PTP_SUPPORT
++static void __kc_sock_efree(struct sk_buff *skb)
++{
++ sock_put(skb->sk);
++}
++
++struct sk_buff *__kc_skb_clone_sk(struct sk_buff *skb)
++{
++ struct sock *sk = skb->sk;
++ struct sk_buff *clone;
++
++ if (!sk || !atomic_inc_not_zero(&sk->sk_refcnt))
++ return NULL;
++
++ clone = skb_clone(skb, GFP_ATOMIC);
++ if (!clone) {
++ sock_put(sk);
++ return NULL;
++ }
++
++ clone->sk = sk;
++ clone->destructor = __kc_sock_efree;
++
++ return clone;
++}
++
++void __kc_skb_complete_tx_timestamp(struct sk_buff *skb,
++ struct skb_shared_hwtstamps *hwtstamps)
++{
++ struct sock_exterr_skb *serr;
++ struct sock *sk = skb->sk;
++ int err;
++
++ sock_hold(sk);
++
++ *skb_hwtstamps(skb) = *hwtstamps;
++
++ serr = SKB_EXT_ERR(skb);
++ memset(serr, 0, sizeof(*serr));
++ serr->ee.ee_errno = ENOMSG;
++ serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING;
++
++ err = sock_queue_err_skb(sk, skb);
++ if (err)
++ kfree_skb(skb);
++
++ sock_put(sk);
++}
++#endif
++
++/* include headers needed for get_headlen function */
++#ifdef HAVE_SCTP
++#include
++#endif
++
++unsigned int __kc_eth_get_headlen(unsigned char *data, unsigned int max_len)
++{
++ union {
++ unsigned char *network;
++ /* l2 headers */
++ struct ethhdr *eth;
++ struct vlan_hdr *vlan;
++ /* l3 headers */
++ struct iphdr *ipv4;
++ struct ipv6hdr *ipv6;
++ } hdr;
++ __be16 proto;
++ u8 nexthdr = 0; /* default to not TCP */
++ u8 hlen;
++
++ /* this should never happen, but better safe than sorry */
++ if (max_len < ETH_HLEN)
++ return max_len;
++
++ /* initialize network frame pointer */
++ hdr.network = data;
++
++ /* set first protocol and move network header forward */
++ proto = hdr.eth->h_proto;
++ hdr.network += ETH_HLEN;
++
++again:
++ switch (proto) {
++ /* handle any vlan tag if present */
++ case __constant_htons(ETH_P_8021AD):
++ case __constant_htons(ETH_P_8021Q):
++ if ((hdr.network - data) > (max_len - VLAN_HLEN))
++ return max_len;
++
++ proto = hdr.vlan->h_vlan_encapsulated_proto;
++ hdr.network += VLAN_HLEN;
++ goto again;
++ /* handle L3 protocols */
++ case __constant_htons(ETH_P_IP):
++ if ((hdr.network - data) > (max_len - sizeof(struct iphdr)))
++ return max_len;
++
++ /* access ihl as a u8 to avoid unaligned access on ia64 */
++ hlen = (hdr.network[0] & 0x0F) << 2;
++
++ /* verify hlen meets minimum size requirements */
++ if (hlen < sizeof(struct iphdr))
++ return hdr.network - data;
++
++ /* record next protocol if header is present */
++ if (!(hdr.ipv4->frag_off & htons(IP_OFFSET)))
++ nexthdr = hdr.ipv4->protocol;
++
++ hdr.network += hlen;
++ break;
++#ifdef NETIF_F_TSO6
++ case __constant_htons(ETH_P_IPV6):
++ if ((hdr.network - data) > (max_len - sizeof(struct ipv6hdr)))
++ return max_len;
++
++ /* record next protocol */
++ nexthdr = hdr.ipv6->nexthdr;
++ hdr.network += sizeof(struct ipv6hdr);
++ break;
++#endif /* NETIF_F_TSO6 */
++ default:
++ return hdr.network - data;
++ }
++
++ /* finally sort out L4 */
++ switch (nexthdr) {
++ case IPPROTO_TCP:
++ if ((hdr.network - data) > (max_len - sizeof(struct tcphdr)))
++ return max_len;
++
++ /* access doff as a u8 to avoid unaligned access on ia64 */
++ hdr.network += max_t(u8, sizeof(struct tcphdr),
++ (hdr.network[12] & 0xF0) >> 2);
++
++ break;
++ case IPPROTO_UDP:
++ case IPPROTO_UDPLITE:
++ hdr.network += sizeof(struct udphdr);
++ break;
++#ifdef HAVE_SCTP
++ case IPPROTO_SCTP:
++ hdr.network += sizeof(struct sctphdr);
++ break;
++#endif
++ }
++
++ /*
++ * If everything has gone correctly hdr.network should be the
++ * data section of the packet and will be the end of the header.
++ * If not then it probably represents the end of the last recognized
++ * header.
++ */
++ return min_t(unsigned int, hdr.network - data, max_len);
++}
++
++#endif /* < 3.18.0 */
++
++/******************************************************************************/
++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,19,0) )
++#ifdef HAVE_NET_GET_RANDOM_ONCE
++static u8 __kc_netdev_rss_key[NETDEV_RSS_KEY_LEN];
++
++void __kc_netdev_rss_key_fill(void *buffer, size_t len)
++{
++ BUG_ON(len > sizeof(__kc_netdev_rss_key));
++ net_get_random_once(__kc_netdev_rss_key, sizeof(__kc_netdev_rss_key));
++ memcpy(buffer, __kc_netdev_rss_key, len);
++}
++#endif
++#endif
+diff -Nu a/drivers/net/ethernet/intel/igb/kcompat.h b/drivers/net/ethernet/intel/igb/kcompat.h
+--- a/drivers/net/ethernet/intel/igb/kcompat.h 1970-01-01 00:00:00.000000000 +0000
++++ b/drivers/net/ethernet/intel/igb/kcompat.h 2016-11-14 14:32:08.583567168 +0000
+@@ -0,0 +1,5071 @@
++/*******************************************************************************
++
++ Intel(R) Gigabit Ethernet Linux driver
++ Copyright(c) 2007-2015 Intel Corporation.
++
++ This program is free software; you can redistribute it and/or modify it
++ under the terms and conditions of the GNU General Public License,
++ version 2, as published by the Free Software Foundation.
++
++ This program is distributed in the hope it will be useful, but WITHOUT
++ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ more details.
++
++ The full GNU General Public License is included in this distribution in
++ the file called "COPYING".
++
++ Contact Information:
++ Linux NICS
++ e1000-devel Mailing List
++ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
++
++*******************************************************************************/
++
++#ifndef _KCOMPAT_H_
++#define _KCOMPAT_H_
++
++#ifndef LINUX_VERSION_CODE
++#include
++#else
++#define KERNEL_VERSION(a,b,c) (((a) << 16) + ((b) << 8) + (c))
++#endif
++#include
++#include
++#include
++#include
++#include
++#include
++#include